diff --git a/.github/workflows/agent_engine_build.yml b/.github/workflows/agent_engine_build.yml index 72a729d02..601c00bfa 100644 --- a/.github/workflows/agent_engine_build.yml +++ b/.github/workflows/agent_engine_build.yml @@ -90,7 +90,15 @@ jobs: rustup update --no-self-update stable rustup target add ${{ matrix.target }} rustup component add rust-src - + + - name: Install LLVM (Windows) + if: startsWith(matrix.os, 'windows') + run: choco install llvm -y + + - name: Install LLVM (macOS) + if: startsWith(matrix.os, 'macos') + run: brew install llvm + - name: setup cross-rs if: matrix.cross run: | diff --git a/.github/workflows/agent_engine_release.yml b/.github/workflows/agent_engine_release.yml index d364cde03..528271e72 100644 --- a/.github/workflows/agent_engine_release.yml +++ b/.github/workflows/agent_engine_release.yml @@ -78,6 +78,14 @@ jobs: rustup target add ${{ matrix.target }} rustup component add rust-src + - name: Install LLVM (Windows) + if: startsWith(matrix.os, 'windows') + run: choco install llvm -y + + - name: Install LLVM (macOS) + if: startsWith(matrix.os, 'macos') + run: brew install llvm + - name: setup cross-rs if: matrix.cross run: | diff --git a/.gitignore b/.gitignore index 03622ad00..13599bba4 100644 --- a/.gitignore +++ b/.gitignore @@ -294,3 +294,6 @@ dist .vite # Refact binary/symlink **/refact/bin/refact-lsp + +.refact_knowledge*/ +.refact*/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b710aa1fc..47af2e184 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -104,7 +104,7 @@ chat_models: supports_agent: true tokenizer: hf://your-tokenizer-path ``` -for more info about which config needs to be set up, you can see known_models.json +for more info about which config needs to be set up, you can see completion_presets.json and embedding_presets.json ### Step 2: Test the Model diff --git a/refact-agent/engine/AGENTS.md b/refact-agent/engine/AGENTS.md new file mode 100644 index 000000000..d49930960 --- /dev/null +++ b/refact-agent/engine/AGENTS.md @@ -0,0 +1,145 @@ +# Refact Agent Engine + +Binary: `refact-lsp` — AI coding agent, HTTP + LSP server. Rust 2021 edition, async/tokio. + +## Stack + +Axum (HTTP), tower-lsp (LSP), tree-sitter (AST), SQLite + vec0 (VecDB), LMDB/Heed (AST store), git2, headless_chrome, whisper-rs (optional, feature-gated), rmcp (MCP). + +## Build + +```bash +cargo build --release # binary at target/release/refact-lsp +cargo build --release --features voice # with Whisper transcription +cargo test --lib && cargo test --doc +``` + +Release profile: `opt-level = "z"`, `lto = true`, `strip = true`, `codegen-units = 1`. + +## Architecture + +`GlobalContext` (`Arc>`) is the central shared state. HTTP server (Axum) and LSP server (tower-lsp) both hold a reference. Background tasks (AST indexer, VecDB, git shadow cleanup, telemetry, knowledge graph, trajectory memos, agent monitor, OAuth refresh) are spawned via `start_background_tasks()` (~12 tokio tasks). + +### Source Layout + +``` +src/ + main.rs — entry point, CLI (--http-port, --lsp-stdin-stdout, --ast, --vecdb, etc.) + global_context.rs — SharedGlobalContext + lsp.rs — tower-lsp LanguageServer impl + http/routers/v1/ — 27+ endpoint modules + chat/ — 22+ files, ~15K LOC (session, queue, generation, tools, trajectories, linearize, stream_core, etc.) + llm/ — LLM adapters (OpenAI, Anthropic wire formats), streaming + tools/ — 50+ tools (file_edit/, search, web, shell, subagent, knowledge, tasks) + ast/ — tree-sitter indexing, 7 parsers (C/C++, Python, Java, Kotlin, JS, Rust, TS) + vecdb/ — SQLite vec0 semantic search + providers/ — 15+ LLM providers (Anthropic, OpenAI, Codex, Refact, DeepSeek, Gemini, Groq, LM Studio, Ollama, OpenRouter, vLLM, xAI, Claude Code, custom) + integrations/ — GitHub, GitLab, Bitbucket, Chrome, PostgreSQL, MySQL, Docker, PDB, cmdline, services, MCP (stdio+SSE) + knowledge_graph/ — petgraph DiGraph, builder/cleanup/staleness/query + scratchpads/ — FIM code completion (PSM/SPM), RAG, multimodality + tasks/ — Kanban task board (planning/active/paused/completed/abandoned) + caps/ — model capabilities resolution, cloud/local detection + git/ — shadow repos, checkpoints + voice/ — Whisper transcription, streaming sessions + telemetry/ — usage tracking + yaml_configs/ — defaults for modes, providers, toolbox commands, prompts + postprocessing/ — token-aware truncation, AST prioritization + agentic/ — commit messages, agentic edit flows +``` + +## Chat System + +### Session State Machine + +`SessionState` enum: `Idle`, `Generating`, `ExecutingTools`, `Paused`, `WaitingIde`, `WaitingUserInput`, `Completed`, `Error`. + +### Modes + +| Mode | Purpose | +|------|---------| +| `NO_TOOLS` | Plain chat | +| `EXPLORE` | Context gathering with quick tools | +| `AGENT` | Autonomous task execution, full toolset | +| `TASK_PLANNER` | Kanban board management | +| `TASK_AGENT` | Execute task cards | + +### SSE Events + +Subscribe: `GET /v1/chats/subscribe?chat_id={id}`. Events have monotonic `seq: u64`. + +Key types: `Snapshot`, `StreamStarted`, `StreamDelta`, `StreamFinished`, `MessageAdded`, `MessageUpdated`, `MessageRemoved`, `MessagesTruncated`, `ThreadUpdated`, `QueueUpdated`, `RuntimeUpdated`, `PauseRequired`. + +### Commands + +`POST /v1/chats/{chat_id}/commands` — queued processing. + +Variants: `UserMessage`, `SetParams`, `UpdateMessage`, `RemoveMessage`, `TruncateMessages`, `RetryFromIndex`, `Abort`, `ApproveTools`, `RejectTools`, `BranchFromChat`, `RestoreFromTrajectory`, `ClearDraft`, `SetDraft`, `Regenerate`. + +### Delta Operations + +`AppendContent`, `AppendReasoning`, `SetToolCalls`, `SetThinkingBlocks`, `AddCitation`, `AddServerContentBlock`, `SetUsage`, `MergeExtra`. + +### Message Flow + +``` +UserMessage → queue → prepare (system prompt, knowledge RAG, history limit) → linearize → LLM stream → StreamCollector → tool calls → loop +``` + +- **`linearize.rs`**: merges consecutive user messages, strips thinking blocks for LLM cache compatibility. +- **`stream_core.rs`**: `merge_thinking_blocks()` — deduplicates by (type,index) → (type,id) → (type,signature); signatures are opaque, latest-wins replacement. +- **`history_limit.rs`**: 4-stage compression (dedup context files → compress tool results → fix tool calls → limit history). `CompressionStrength`: Absent/Low/Medium/High. + +### Anthropic Thinking/Signatures + +Thinking blocks with cryptographic signatures must be preserved verbatim — no JSON rebuilding, no field reordering. Signatures validate exact prior content-block sequence. During streaming, accumulate deltas preserving metadata (block_index, signature) separately from text. For multi-provider chats, strip provider-specific blocks (thinking/signatures) on model switch. `strip_thinking_blocks_if_disabled()` in prepare.rs removes them when model lacks reasoning support. + +### Trajectories + +Stored: `.refact/trajectories/{chat_id}.json`. Atomic writes (`.tmp` → rename). Rich JSON: id, title, model, mode, tool_use, messages, task_meta, version, created_at, reasoning_effort, checkpoints_enabled, parent_id, root_chat_id, etc. + +OpenAI conversion lives in `src/llm/adapters/openai_chat.rs` (`convert_messages_to_openai()`). + +## Tools + +~50+ tools, filtered by mode/capabilities/config. Registered in `tools_list.rs`. + +**Categories**: Codebase search (AST defs, tree, cat, regex, semantic) · Codebase change (create/update/rm/mv/undo/apply_patch — confirmation required) · Web (fetch, search, Chrome automation) · System (shell, cmdline_*, service_*) · Knowledge (search, create, trajectories) · Agent (subagent, strategic_planning, deep_research, code_review) · Task management (~18 tools) · IDE (open_file, paste_text) · Integration-defined + MCP tools. + +Tool trait: `tool_execute(&mut self, ccx, tool_call_id, args) -> Result<(bool, Vec)>`. + +`AtCommandsContext` provides: global_context, chat_id, n_ctx, abort_flag, messages, current_model, task_meta, subchat depth/channels, postprocess params. + +## HTTP API + +Base: `http://127.0.0.1:{port}/v1/`. Middleware: permissive CORS, 15MB body limit, telemetry logging. + +Key endpoints: `/ping`, `/caps`, `/graceful-shutdown`, `/chats/{id}/commands`, `/chats/subscribe`, `/chat` (legacy), `/code-completion`, `/code-lens`, `/tools`, `/tools-check-if-confirmation-needed`, `/ast-file-symbols`, `/ast-status`, `/rag-status`, `/vecdb-search`, `/git-commit`, `/checkpoints-preview`, `/checkpoints-restore`, `/integrations`, `/integration-get`, `/integration-save`, `/knowledge/update-memory`, `/knowledge/delete-memory`, `/knowledge-graph`, `/voice/transcribe`, `/voice/stream/{id}`, `/voice/stream/{id}/chunk`, `/telemetry-network`, `/snippet-accepted`. + +## AST + +8 languages: C, C++, Python, Java, Kotlin, JavaScript, Rust, TypeScript (7 tree-sitter parsers; C/C++ share parser). Two-phase indexing: parse+store → link cross-references. Storage in LMDB with key prefixes (`d|` defs, `c|` fuzzy lookup, `u|` back-links, `classes|` inheritance). Background thread with batch processing. Skeletonizer generates abbreviated code for embeddings. + +## VecDB + +SQLite + vec0 extension. File splitters: trajectory JSON (4 msgs/chunk), Markdown (heading-aware), code (AST-aware token windows). Embedding via external HTTP API with batching/retry. Search: cosine KNN → reject threshold → normalize usefulness score. Background thread: enqueue → split → cache check → embed → store. Cleanup: keep 10 newest tables, drop >7 days. + +## Providers + +15+ providers in `src/providers/`: Anthropic, Claude Code, OpenAI, Codex, Refact, DeepSeek, Google Gemini, Groq, LM Studio, Ollama, OpenRouter, vLLM, xAI, custom. Each defines ProviderDefaults (chat/completion/embedding models). OAuth support for Codex/Claude Code. YAML configs in `yaml_configs/default_providers/`. + +## Integrations + +GitHub, GitLab, Bitbucket, Chrome (headless), PostgreSQL, MySQL, Docker, PDB, shell, cmdline_* (one-off), service_* (long-running), MCP (stdio + SSE). Config: `.refact/integrations/*.yaml`. Trait: `integr_tools()`, `integr_schema()`, `integr_settings_apply()`. + +## Testing + +- **Python integration tests** (~38 files in `tests/`): live HTTP+SSE against running server. 7 `test_chat_session_*.py` files. +- **Rust unit tests**: `src/chat/tests.rs`, AST parser tests, 50+ modules. `cargo test --lib`. +- **Test data**: `tests/emergency_frog_situation/` — themed frog simulations for parsing edge cases. + +## Config + +- **User**: `~/.config/refact/` (default_privacy.yaml, providers.d/*.yaml) +- **Cache**: `~/.cache/refact/` (shadow repos, logs, telemetry, integrations) +- **Project**: `.refact/` (trajectories/, knowledge/, tasks/, integrations/) +- **System prompts**: `yaml_configs/defaults/` — modes, subagents, toolbox commands. Magic vars: `%ARGS%`, `%CODE_SELECTION%`, `%WORKSPACE_INFO%`, `%PROJECT_TREE%`. diff --git a/refact-agent/engine/Cargo.toml b/refact-agent/engine/Cargo.toml index 49158c5f3..59a36425b 100644 --- a/refact-agent/engine/Cargo.toml +++ b/refact-agent/engine/Cargo.toml @@ -6,9 +6,13 @@ lto = true [package] name = "refact-lsp" -version = "0.10.30" +version = "7.0.1" edition = "2021" build = "build.rs" + +[features] +default = ["voice"] +voice = ["dep:whisper-rs", "dep:symphonia", "dep:symphonia-bundle-mp3", "dep:rubato"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [build-dependencies] @@ -18,6 +22,7 @@ shadow-rs = "1.1.0" winreg = "0.55.0" [dependencies] + astral-tokio-tar = "0.5.2" axum = { version = "0.6.20", features = ["default", "http2"] } async-stream = "0.3.5" @@ -38,11 +43,13 @@ heed = "0.22.0" home = "0.5" hostname = "0.4" html2text = "0.12.5" +humantime = "2.1" hyper = { version = "0.14", features = ["server", "stream"] } image = "0.25.2" indexmap = { version = "1.9.1", features = ["serde-1"] } itertools = "0.14.0" lazy_static = "1.4.0" +libc = "0.2" libsqlite3-sys = "0.28.0" log = "0.4.20" md5 = "0.7" @@ -55,6 +62,7 @@ rayon = "1.8.0" regex = "1.9.5" reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls-webpki-roots", "charset", "http2"] } reqwest-eventsource = "0.6.0" +eventsource-stream = "0.2" resvg = "0.44.0" ropey = "1.6" rusqlite = { version = "0.31.0", features = ["bundled"] } @@ -67,6 +75,7 @@ serde_yaml = "0.9.31" # all features = ["compression", "docs", "event_log", "failpoints", "io_uring", "lock_free_delays", "measure_allocs", "miri_optimizations", "mutex", "no_inline", "no_logs", "pretty_backtrace", "testing"] shadow-rs = { version = "1.1.0", features = [], default-features = false } sha2 = "0.10.8" +hex = "0.4.3" shell-words = "1.1.0" shell-escape = "0.1.5" select = "0.6.0" @@ -105,4 +114,16 @@ zerocopy = "0.8.14" # There you can use a local copy # rmcp = { path = "../../../rust-sdk/crates/rmcp/", "features" = ["client", "transport-child-process", "transport-sse"] } rmcp = { git = "https://github.com/smallcloudai/rust-sdk", branch = "main", features = ["client", "transport-child-process", "transport-sse-client", "reqwest"] } -thiserror = "2.0.12" \ No newline at end of file +thiserror = "2.0.12" +dirs = "5.0" +whisper-rs = { version = "0.12", optional = true } +symphonia = { version = "0.5", default-features = false, features = ["wav", "ogg", "pcm", "vorbis"], optional = true } +symphonia-bundle-mp3 = { version = "0.5", optional = true } +rubato = { version = "0.15", optional = true } + +[dev-dependencies] +tempfile = "3.8" +proptest = "1.4" +insta = "1.34" +uuid = { version = "1.6", features = ["v4", "serde"] } +chrono = { version = "0.4", features = ["serde"] } diff --git a/refact-agent/engine/Cross.toml b/refact-agent/engine/Cross.toml index 90b2da6b7..a61d477e5 100644 --- a/refact-agent/engine/Cross.toml +++ b/refact-agent/engine/Cross.toml @@ -1,7 +1,7 @@ [target.aarch64-unknown-linux-gnu] pre-build = [ "dpkg --add-architecture arm64", - "apt-get update && apt-get install --assume-yes libssl-dev:arm64 curl unzip", + "apt-get update && apt-get install --assume-yes libssl-dev:arm64 libclang-dev curl unzip", ] [target.aarch64-unknown-linux-gnu.env] passthrough = [ @@ -12,7 +12,7 @@ passthrough = [ [target.x86_64-unknown-linux-gnu] pre-build = [ - "apt-get update && apt-get install --assume-yes libssl-dev curl unzip", + "apt-get update && apt-get install --assume-yes libssl-dev libclang-dev curl unzip", ] [target.x86_64-unknown-linux-gnu.env] passthrough = [ diff --git a/refact-agent/engine/build.rs b/refact-agent/engine/build.rs index cfe1015ca..1b74ba6ba 100644 --- a/refact-agent/engine/build.rs +++ b/refact-agent/engine/build.rs @@ -1,4 +1,3 @@ - fn main() { shadow_rs::ShadowBuilder::builder().build().unwrap(); } diff --git a/refact-agent/engine/src/agentic/compress_trajectory.rs b/refact-agent/engine/src/agentic/compress_trajectory.rs index 68423cdee..4602c5888 100644 --- a/refact-agent/engine/src/agentic/compress_trajectory.rs +++ b/refact-agent/engine/src/agentic/compress_trajectory.rs @@ -1,95 +1,11 @@ -use crate::at_commands::at_commands::AtCommandsContext; use crate::call_validation::{ChatContent, ChatMessage}; -use crate::global_context::{try_load_caps_quickly_if_not_present, GlobalContext}; -use crate::subchat::subchat_single; +use crate::global_context::GlobalContext; +use crate::subchat::run_subchat_once; +use crate::yaml_configs::customization_registry::get_subagent_config; use std::sync::Arc; -use tokio::sync::Mutex as AMutex; use tokio::sync::RwLock as ARwLock; -use crate::caps::strip_model_from_finetune; -const COMPRESSION_MESSAGE: &str = r#"Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions. -This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context. - -Before providing your final summary, wrap your analysis in tags to organize your thoughts and ensure you've covered all necessary points. In your analysis process: - -1. Chronologically analyze each message and section of the conversation. For each section thoroughly identify: - - The user's explicit requests and intents - - Your approach to addressing the user's requests - - Key decisions, technical concepts and code patterns - - Specific details like file names, full code snippets, function signatures, file edits, etc -2. Double-check for technical accuracy and completeness, addressing each required element thoroughly. - -Your summary should include the following sections: - -1. Primary Request and Intent: Capture all of the user's explicit requests and intents in detail -2. Key Technical Concepts: List all important technical concepts, technologies, and frameworks discussed. -3. Files and Code Sections: Enumerate specific files and code sections examined, modified, or created. Pay special attention to the most recent messages and include full code snippets where applicable and include a summary of why this file read or edit is important. -4. Problem Solving: Document problems solved and any ongoing troubleshooting efforts. -5. Pending Tasks: Outline any pending tasks that you have explicitly been asked to work on. -6. Current Work: Describe in detail precisely what was being worked on immediately before this summary request, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable. -7. Optional Next Step: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's explicit requests, and the task you were working on immediately before this summary request. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. Do not start on tangential requests without confirming with the user first. -8. If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no drift in task interpretation. - -Here's an example of how your output should be structured: - - - -[Your thought process, ensuring all points are covered thoroughly and accurately] - - - -1. Primary Request and Intent: - [Detailed description] - -2. Key Technical Concepts: - - [Concept 1] - - [Concept 2] - - [...] - -3. Files and Code Sections: - - [File Name 1] - - [Summary of why this file is important] - - [Summary of the changes made to this file, if any] - - [Important Code Snippet] - - [File Name 2] - - [Important Code Snippet] - - [...] - -4. Problem Solving: - [Description of solved problems and ongoing troubleshooting]` - -5. Pending Tasks: - - [Task 1] - - [Task 2] - - [...] - -6. Current Work: - [Precise description of current work] - -7. Optional Next Step: - [Optional Next step to take] - - - - -Please provide your summary based on the conversation so far, following this structure and ensuring precision and thoroughness in your response."#; -const TEMPERATURE: f32 = 0.0; - -fn gather_used_tools(messages: &Vec) -> Vec { - let mut tools: Vec = Vec::new(); - - for message in messages { - if let Some(tool_calls) = &message.tool_calls { - for tool_call in tool_calls { - if !tools.contains(&tool_call.function.name) { - tools.push(tool_call.function.name.clone()); - } - } - } - } - - tools -} +const SUBAGENT_ID: &str = "compress_trajectory"; pub async fn compress_trajectory( gcx: Arc>, @@ -98,68 +14,34 @@ pub async fn compress_trajectory( if messages.is_empty() { return Err("The provided chat is empty".to_string()); } - let (model_id, n_ctx) = match try_load_caps_quickly_if_not_present(gcx.clone(), 0).await { - Ok(caps) => { - let model_id = caps.defaults.chat_light_model.clone(); - if let Some(model_rec) = caps.chat_models.get(&strip_model_from_finetune(&model_id)) { - Ok((model_id, model_rec.base.n_ctx)) - } else { - Err(format!( - "Model '{}' not found, server has these models: {:?}", - model_id, caps.chat_models.keys() - )) - } - }, - Err(_) => Err("No caps available".to_string()), - }?; - let mut messages_compress = messages.clone(); - messages_compress.push( - ChatMessage { - role: "user".to_string(), - content: ChatContent::SimpleText(COMPRESSION_MESSAGE.to_string()), - ..Default::default() - }, - ); - let ccx: Arc> = Arc::new(AMutex::new(AtCommandsContext::new( - gcx.clone(), - n_ctx, - 1, - false, - messages_compress.clone(), - "".to_string(), - false, - model_id.clone(), - ).await)); - let tools = gather_used_tools(&messages); - let new_messages = subchat_single( - ccx.clone(), - &model_id, - messages_compress, - Some(tools), - None, - false, - Some(TEMPERATURE), - None, - 1, - None, - true, - None, - None, - None, - ).await.map_err(|e| format!("Error: {}", e))?; - let content = new_messages - .into_iter() - .next() - .map(|x| { - x.into_iter().last().map(|last_m| match last_m.content { - ChatContent::SimpleText(text) => Some(text), - ChatContent::Multimodal(_) => None, - }) + let subagent_config = get_subagent_config(gcx.clone(), SUBAGENT_ID, None) + .await + .ok_or_else(|| format!("subagent config '{}' not found", SUBAGENT_ID))?; + + let compression_prompt = subagent_config.messages.user_template + .as_ref() + .ok_or_else(|| format!("messages.user_template not defined for subagent '{}'", SUBAGENT_ID))?; + + let mut messages_compress = messages.clone(); + messages_compress.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(compression_prompt.clone()), + ..Default::default() + }); + + let result = run_subchat_once(gcx, SUBAGENT_ID, messages_compress) + .await + .map_err(|e| format!("Error: {}", e))?; + + let content = result + .messages + .last() + .and_then(|last_m| match &last_m.content { + ChatContent::SimpleText(text) => Some(text.clone()), + _ => None, }) - .flatten() - .flatten() .ok_or("No traj message was generated".to_string())?; - let compressed_message = format!("{content}\n\nPlease, continue the conversation based on the provided summary"); - Ok(compressed_message) + + Ok(content) } diff --git a/refact-agent/engine/src/agentic/generate_code_edit.rs b/refact-agent/engine/src/agentic/generate_code_edit.rs index 7c2cf7ffd..de58513a7 100644 --- a/refact-agent/engine/src/agentic/generate_code_edit.rs +++ b/refact-agent/engine/src/agentic/generate_code_edit.rs @@ -1,25 +1,11 @@ -use crate::at_commands::at_commands::AtCommandsContext; use crate::call_validation::{ChatContent, ChatMessage}; -use crate::global_context::{try_load_caps_quickly_if_not_present, GlobalContext}; -use crate::subchat::subchat_single; +use crate::global_context::GlobalContext; +use crate::subchat::run_subchat_once; +use crate::yaml_configs::customization_registry::get_subagent_config; use std::sync::Arc; -use tokio::sync::Mutex as AMutex; use tokio::sync::RwLock as ARwLock; -const CODE_EDIT_SYSTEM_PROMPT: &str = r#"You are a code editing assistant. Your task is to modify the provided code according to the user's instruction. - -# Rules -1. Return ONLY the edited code - no explanations, no markdown fences, no commentary -2. Preserve the original indentation style and formatting conventions -3. Make minimal changes necessary to fulfill the instruction -4. If the instruction is unclear, make the most reasonable interpretation -5. Keep all code that isn't directly related to the instruction unchanged - -# Output Format -Return the edited code directly, without any wrapping or explanation. The output should be valid code that can directly replace the input."#; - -const N_CTX: usize = 32000; -const TEMPERATURE: f32 = 0.1; +const SUBAGENT_ID: &str = "code_edit"; fn remove_markdown_fences(text: &str) -> String { let trimmed = text.trim(); @@ -55,6 +41,14 @@ pub async fn generate_code_edit( return Err("The instruction is empty".to_string()); } + let subagent_config = get_subagent_config(gcx.clone(), SUBAGENT_ID, None) + .await + .ok_or_else(|| format!("subagent config '{}' not found", SUBAGENT_ID))?; + + let system_prompt = subagent_config.messages.system_prompt + .as_ref() + .ok_or_else(|| format!("messages.system_prompt not defined for subagent '{}'", SUBAGENT_ID))?; + let user_message = format!( "File: {} (line {})\n\nCode to edit:\n```\n{}\n```\n\nInstruction: {}", cursor_file, cursor_line, code, instruction @@ -63,7 +57,7 @@ pub async fn generate_code_edit( let messages = vec![ ChatMessage { role: "system".to_string(), - content: ChatContent::SimpleText(CODE_EDIT_SYSTEM_PROMPT.to_string()), + content: ChatContent::SimpleText(system_prompt.clone()), ..Default::default() }, ChatMessage { @@ -73,63 +67,19 @@ pub async fn generate_code_edit( }, ]; - let model_id = match try_load_caps_quickly_if_not_present(gcx.clone(), 0).await { - Ok(caps) => { - // Prefer light model for fast inline edits, fallback to default - let light = &caps.defaults.chat_light_model; - if !light.is_empty() { - Ok(light.clone()) - } else { - Ok(caps.defaults.chat_default_model.clone()) - } - } - Err(_) => Err("No caps available".to_string()), - }?; - - let ccx: Arc> = Arc::new(AMutex::new( - AtCommandsContext::new( - gcx.clone(), - N_CTX, - 1, - false, - messages.clone(), - "".to_string(), - false, - model_id.clone(), - ) - .await, - )); - - let new_messages = subchat_single( - ccx.clone(), - &model_id, - messages, - Some(vec![]), // No tools - pure generation - None, - false, - Some(TEMPERATURE), - None, - 1, - None, - false, // Don't prepend system prompt - we have our own - None, - None, - None, - ) - .await - .map_err(|e| format!("Error generating code edit: {}", e))?; + let result = run_subchat_once(gcx, SUBAGENT_ID, messages) + .await + .map_err(|e| format!("Error generating code edit: {}", e))?; - let edited_code = new_messages - .into_iter() - .next() - .and_then(|msgs| msgs.into_iter().last()) - .and_then(|msg| match msg.content { - ChatContent::SimpleText(text) => Some(text), - ChatContent::Multimodal(_) => None, + let edited_code = result + .messages + .last() + .and_then(|msg| match &msg.content { + ChatContent::SimpleText(text) => Some(text.clone()), + _ => None, }) .ok_or("No edited code was generated".to_string())?; - // Strip markdown fences if present Ok(remove_markdown_fences(&edited_code)) } @@ -140,7 +90,10 @@ mod tests { #[test] fn test_remove_markdown_fences_with_language() { let input = "```python\ndef hello():\n print('world')\n```"; - assert_eq!(remove_markdown_fences(input), "def hello():\n print('world')"); + assert_eq!( + remove_markdown_fences(input), + "def hello():\n print('world')" + ); } #[test] diff --git a/refact-agent/engine/src/agentic/generate_commit_message.rs b/refact-agent/engine/src/agentic/generate_commit_message.rs index cefa981ab..9f856f721 100644 --- a/refact-agent/engine/src/agentic/generate_commit_message.rs +++ b/refact-agent/engine/src/agentic/generate_commit_message.rs @@ -1,329 +1,16 @@ use std::path::PathBuf; -use crate::at_commands::at_commands::AtCommandsContext; use crate::call_validation::{ChatContent, ChatMessage}; use crate::files_correction::CommandSimplifiedDirExt; -use crate::global_context::{try_load_caps_quickly_if_not_present, GlobalContext}; -use crate::subchat::subchat_single; +use crate::global_context::GlobalContext; +use crate::subchat::run_subchat_once; +use crate::yaml_configs::customization_registry::get_subagent_config; use std::sync::Arc; use hashbrown::HashMap; -use tokio::sync::Mutex as AMutex; use tokio::sync::RwLock as ARwLock; use tracing::warn; use crate::files_in_workspace::detect_vcs_for_a_file_path; -const DIFF_ONLY_PROMPT: &str = r#"Generate a commit message following the Conventional Commits specification. - -# Conventional Commits Format - -``` -(): - -[optional body] - -[optional footer(s)] -``` - -## Commit Types (REQUIRED - choose exactly one) -- `feat`: New feature (correlates with MINOR in SemVer) -- `fix`: Bug fix (correlates with PATCH in SemVer) -- `refactor`: Code restructuring without changing behavior -- `perf`: Performance improvement -- `docs`: Documentation only changes -- `style`: Code style changes (formatting, whitespace, semicolons) -- `test`: Adding or correcting tests -- `build`: Changes to build system or dependencies -- `ci`: Changes to CI configuration -- `chore`: Maintenance tasks (tooling, configs, no production code change) -- `revert`: Reverting a previous commit - -## Rules - -### Subject Line (REQUIRED) -1. Format: `(): ` or `: ` -2. Use imperative mood ("add" not "added" or "adds") -3. Do NOT capitalize the first letter of description -4. Do NOT end with a period -5. Keep under 50 characters (hard limit: 72) -6. Scope is optional but recommended for larger projects - -### Body (OPTIONAL - use for complex changes) -1. Separate from subject with a blank line -2. Wrap at 72 characters -3. Explain WHAT and WHY, not HOW -4. Use bullet points for multiple items - -### Footer (OPTIONAL) -1. Reference issues: `Fixes #123`, `Closes #456`, `Refs #789` -2. Breaking changes: Start with `BREAKING CHANGE:` or add `!` after type -3. Co-authors: `Co-authored-by: Name ` - -## Breaking Changes -- Add `!` after type/scope: `feat!:` or `feat(api)!:` -- Or include `BREAKING CHANGE:` footer with explanation - -# Steps - -1. Analyze the diff to understand what changed -2. Determine the PRIMARY type of change (feat, fix, refactor, etc.) -3. Identify scope from affected files/modules (optional) -4. Write description in imperative mood explaining the intent -5. Add body only if the change is complex and needs explanation -6. Add footer for issue references or breaking changes if applicable - -# Examples - -**Input (diff)**: -```diff -- public class UserManager { -- private final UserDAO userDAO; -+ public class UserManager { -+ private final UserService userService; -+ private final NotificationService notificationService; -``` - -**Output**: -``` -refactor(user): replace UserDAO with service-based architecture - -Introduce UserService and NotificationService to improve separation of -concerns and make user management logic more reusable. -``` - -**Input (diff)**: -```diff -- if (age > 17) { -- accessAllowed = true; -- } else { -- accessAllowed = false; -- } -+ accessAllowed = age > 17; -``` - -**Output**: -``` -refactor: simplify age check with ternary expression -``` - -**Input (diff)**: -```diff -+ export async function fetchUserProfile(userId: string) { -+ const response = await api.get(`/users/${userId}`); -+ return response.data; -+ } -``` - -**Output**: -``` -feat(api): add user profile fetch endpoint -``` - -**Input (diff)**: -```diff -- const timeout = 5000; -+ const timeout = 30000; -``` - -**Output**: -``` -fix(database): increase query timeout to prevent failures - -Extend timeout from 5s to 30s to resolve query failures during peak load. - -Fixes #234 -``` - -**Input (breaking change)**: -```diff -- function getUser(id) { return users[id]; } -+ function getUser(id) { return { user: users[id], metadata: {} }; } -``` - -**Output**: -``` -feat(api)!: wrap user response in object with metadata - -BREAKING CHANGE: getUser() now returns { user, metadata } instead of -user directly. Update all callers to access .user property. -``` - -# Important Guidelines - -- Choose the MOST significant type if changes span multiple categories -- Be specific in the description - avoid vague terms like "update", "fix stuff" -- The subject should complete: "If applied, this commit will " -- One commit = one logical change (if diff has unrelated changes, note it) -- Scope should reflect the module, component, or area affected"#; - -const DIFF_WITH_USERS_TEXT_PROMPT: &str = r#"Generate a commit message following Conventional Commits, using the user's input as context for intent. - -# Conventional Commits Format - -``` -(): - -[optional body] - -[optional footer(s)] -``` - -## Commit Types (REQUIRED - choose exactly one) -- `feat`: New feature (correlates with MINOR in SemVer) -- `fix`: Bug fix (correlates with PATCH in SemVer) -- `refactor`: Code restructuring without changing behavior -- `perf`: Performance improvement -- `docs`: Documentation only changes -- `style`: Code style changes (formatting, whitespace, semicolons) -- `test`: Adding or correcting tests -- `build`: Changes to build system or dependencies -- `ci`: Changes to CI configuration -- `chore`: Maintenance tasks (tooling, configs, no production code change) -- `revert`: Reverting a previous commit - -## Rules - -### Subject Line (REQUIRED) -1. Format: `(): ` or `: ` -2. Use imperative mood ("add" not "added" or "adds") -3. Do NOT capitalize the first letter of description -4. Do NOT end with a period -5. Keep under 50 characters (hard limit: 72) -6. Scope is optional but recommended for larger projects - -### Body (OPTIONAL - use for complex changes) -1. Separate from subject with a blank line -2. Wrap at 72 characters -3. Explain WHAT and WHY, not HOW -4. Use bullet points for multiple items - -### Footer (OPTIONAL) -1. Reference issues: `Fixes #123`, `Closes #456`, `Refs #789` -2. Breaking changes: Start with `BREAKING CHANGE:` or add `!` after type -3. Co-authors: `Co-authored-by: Name ` - -## Breaking Changes -- Add `!` after type/scope: `feat!:` or `feat(api)!:` -- Or include `BREAKING CHANGE:` footer with explanation - -# Steps - -1. Analyze the user's initial commit message to understand their intent -2. Analyze the diff to understand the actual changes -3. Determine the correct type based on the nature of changes -4. Extract or infer a scope from user input or affected files -5. Synthesize user intent + diff analysis into a proper conventional commit -6. If user mentions an issue number, include it in the footer - -# Examples - -**Input (user's message)**: -``` -fix the login bug -``` - -**Input (diff)**: -```diff -- if (user.password === input) { -+ if (await bcrypt.compare(input, user.passwordHash)) { -``` - -**Output**: -``` -fix(auth): use bcrypt for secure password comparison - -Replace plaintext password comparison with bcrypt hash verification -to fix authentication vulnerability. -``` - -**Input (user's message)**: -``` -Refactor UserManager to use services instead of DAOs -``` - -**Input (diff)**: -```diff -- public class UserManager { -- private final UserDAO userDAO; -+ public class UserManager { -+ private final UserService userService; -+ private final NotificationService notificationService; -``` - -**Output**: -``` -refactor(user): replace UserDAO with service-based architecture - -Introduce UserService and NotificationService to improve separation of -concerns and make user management logic more reusable. -``` - -**Input (user's message)**: -``` -added new endpoint for users #123 -``` - -**Input (diff)**: -```diff -+ @GetMapping("/users/{id}/preferences") -+ public ResponseEntity getUserPreferences(@PathVariable Long id) { -+ return ResponseEntity.ok(userService.getPreferences(id)); -+ } -``` - -**Output**: -``` -feat(api): add user preferences endpoint - -Refs #123 -``` - -**Input (user's message)**: -``` -cleanup -``` - -**Input (diff)**: -```diff -- // TODO: implement later -- // console.log("debug"); -- const unusedVar = 42; -``` - -**Output**: -``` -chore: remove dead code and debug artifacts -``` - -**Input (user's message)**: -``` -BREAKING: change API response format -``` - -**Input (diff)**: -```diff -- return user; -+ return { data: user, version: "2.0" }; -``` - -**Output**: -``` -feat(api)!: wrap responses in versioned data envelope - -BREAKING CHANGE: All API responses now return { data, version } object -instead of raw data. Clients must access response.data for the payload. -``` - -# Important Guidelines - -- Preserve the user's intent but format it correctly -- If user mentions "bug", "fix", "broken" → likely `fix` -- If user mentions "add", "new", "feature" → likely `feat` -- If user mentions "refactor", "restructure", "reorganize" → `refactor` -- If user mentions "clean", "remove unused" → likely `chore` or `refactor` -- Extract issue numbers (#123) from user text and move to footer -- The subject should complete: "If applied, this commit will " -- Don't just paraphrase the user - analyze the diff to add specificity"#; -const N_CTX: usize = 32000; -const TEMPERATURE: f32 = 0.5; +const SUBAGENT_ID: &str = "commit_message"; pub fn remove_fencing(message: &String) -> Vec { let trimmed_message = message.trim(); @@ -341,7 +28,9 @@ pub fn remove_fencing(message: &String) -> Vec { if in_code_block { let part_lines: Vec<&str> = part.lines().collect(); if !part_lines.is_empty() { - let start_idx = if part_lines[0].trim().split_whitespace().count() <= 1 && part_lines.len() > 1 { + let start_idx = if part_lines[0].trim().split_whitespace().count() <= 1 + && part_lines.len() > 1 + { 1 } else { 0 @@ -383,7 +72,10 @@ mod tests { #[test] fn test_language_tag() { let input = "```rust\nfn main() {\n println!(\"Hello\");\n}\n```".to_string(); - assert_eq!(remove_fencing(&input), vec!["fn main() {\n println!(\"Hello\");\n}".to_string()]); + assert_eq!( + remove_fencing(&input), + vec!["fn main() {\n println!(\"Hello\");\n}".to_string()] + ); } #[test] @@ -395,7 +87,13 @@ mod tests { #[test] fn test_multiple_code_blocks() { let input = "First paragraph\n```\nFirst code\n```\nMiddle text\n```python\ndef hello():\n print('world')\n```\nLast paragraph".to_string(); - assert_eq!(remove_fencing(&input), vec!["First code".to_string(), "def hello():\n print('world')".to_string()]); + assert_eq!( + remove_fencing(&input), + vec![ + "First code".to_string(), + "def hello():\n print('world')".to_string() + ] + ); } #[test] @@ -413,11 +111,19 @@ pub async fn generate_commit_message_by_diff( if diff.is_empty() { return Err("The provided diff is empty".to_string()); } + + let subagent_config = get_subagent_config(gcx.clone(), SUBAGENT_ID, None) + .await + .ok_or_else(|| format!("subagent config '{}' not found", SUBAGENT_ID))?; + let messages = if let Some(text) = commit_message_prompt { + let system_prompt = subagent_config.prompts.diff_with_user_text + .as_ref() + .ok_or_else(|| format!("prompts.diff_with_user_text not defined for subagent '{}'", SUBAGENT_ID))?; vec![ ChatMessage { role: "system".to_string(), - content: ChatContent::SimpleText(DIFF_WITH_USERS_TEXT_PROMPT.to_string()), + content: ChatContent::SimpleText(system_prompt.clone()), ..Default::default() }, ChatMessage { @@ -430,10 +136,13 @@ pub async fn generate_commit_message_by_diff( }, ] } else { + let system_prompt = subagent_config.prompts.diff_only + .as_ref() + .ok_or_else(|| format!("prompts.diff_only not defined for subagent '{}'", SUBAGENT_ID))?; vec![ ChatMessage { role: "system".to_string(), - content: ChatContent::SimpleText(DIFF_ONLY_PROMPT.to_string()), + content: ChatContent::SimpleText(system_prompt.clone()), ..Default::default() }, ChatMessage { @@ -443,50 +152,17 @@ pub async fn generate_commit_message_by_diff( }, ] }; - let model_id = match try_load_caps_quickly_if_not_present(gcx.clone(), 0).await { - Ok(caps) => Ok(caps.defaults.chat_default_model.clone()), - Err(_) => Err("No caps available".to_string()), - }?; - let ccx: Arc> = Arc::new(AMutex::new(AtCommandsContext::new( - gcx.clone(), - N_CTX, - 1, - false, - messages.clone(), - "".to_string(), - false, - model_id.clone(), - ).await)); - let new_messages = subchat_single( - ccx.clone(), - &model_id, - messages, - Some(vec![]), - None, - false, - Some(TEMPERATURE), - None, - 1, - None, - true, - None, - None, - None, - ) + let result = run_subchat_once(gcx, SUBAGENT_ID, messages) .await .map_err(|e| format!("Error: {}", e))?; - let commit_message = new_messages - .into_iter() - .next() - .map(|x| { - x.into_iter().last().map(|last_m| match last_m.content { - ChatContent::SimpleText(text) => Some(text), - ChatContent::Multimodal(_) => None, - }) + let commit_message = result + .messages + .last() + .and_then(|last_m| match &last_m.content { + ChatContent::SimpleText(text) => Some(text.clone()), + _ => None, }) - .flatten() - .flatten() .ok_or("No commit message was generated".to_string())?; let code_blocks = remove_fencing(&commit_message); @@ -500,7 +176,14 @@ pub async fn generate_commit_message_by_diff( pub async fn _generate_commit_message_for_projects( gcx: Arc>, ) -> Result, String> { - let project_folders = gcx.read().await.documents_state.workspace_folders.lock().unwrap().clone(); + let project_folders = gcx + .read() + .await + .documents_state + .workspace_folders + .lock() + .unwrap() + .clone(); let mut commit_messages = HashMap::new(); for folder in project_folders { @@ -527,14 +210,18 @@ pub async fn _generate_commit_message_for_projects( .map_err(|e| format!("Failed to execute command for folder {folder:?}: {e}"))?; if !output.status.success() { - warn!("Command failed for folder {folder:?}: {}", String::from_utf8_lossy(&output.stderr)); + warn!( + "Command failed for folder {folder:?}: {}", + String::from_utf8_lossy(&output.stderr) + ); continue; } let diff_output = String::from_utf8_lossy(&output.stdout).to_string(); - let commit_message = generate_commit_message_by_diff(gcx.clone(), &diff_output, &None).await?; + let commit_message = + generate_commit_message_by_diff(gcx.clone(), &diff_output, &None).await?; commit_messages.insert(folder, commit_message); } Ok(commit_messages) -} \ No newline at end of file +} diff --git a/refact-agent/engine/src/agentic/generate_follow_up_message.rs b/refact-agent/engine/src/agentic/generate_follow_up_message.rs index e6faca196..a011a7d8a 100644 --- a/refact-agent/engine/src/agentic/generate_follow_up_message.rs +++ b/refact-agent/engine/src/agentic/generate_follow_up_message.rs @@ -1,38 +1,15 @@ use std::sync::Arc; use serde::Deserialize; -use tokio::sync::{RwLock as ARwLock, Mutex as AMutex}; +use tokio::sync::RwLock as ARwLock; use crate::custom_error::MapErrToString; use crate::global_context::GlobalContext; -use crate::at_commands::at_commands::AtCommandsContext; -use crate::subchat::subchat_single; +use crate::subchat::run_subchat_once; use crate::call_validation::{ChatContent, ChatMessage}; use crate::json_utils; +use crate::yaml_configs::customization_registry::get_subagent_config; -const PROMPT: &str = r#" -Your task is to do two things for a conversation between a user and an assistant: - -1. **Follow-Up Messages:** - - Create up to 3 follow-up messages that the user might send after the assistant's last message. - - Maximum 3 words each, preferably 1 or 2 words. - - Each message should have a different meaning. - - If the assistant's last message contains a question, generate different replies that address that question. - - If there is no clear follow-up, return an empty list. - - If assistant's work looks completed, return an empty list. - - If there is nothing but garbage in the text you see, return an empty list. - - If not sure, return an empty list. - -2. **Topic Change Detection:** - - Decide if the user's latest message is about a different topic or a different project or a different problem from the previous conversation. - - A topic change means the new topic is not related to the previous discussion. - -Return the result in this JSON format (without extra formatting): - -{ - "follow_ups": ["Follow-up 1", "Follow-up 2", "Follow-up 3", "Follow-up 4", "Follow-up 5"], - "topic_changed": true -} -"#; +const SUBAGENT_ID: &str = "follow_up"; #[derive(Deserialize, Clone)] pub struct FollowUpResponse { @@ -40,33 +17,29 @@ pub struct FollowUpResponse { pub topic_changed: bool, } -fn _make_conversation( - messages: &Vec -) -> Vec { +fn _make_conversation(messages: &Vec, system_prompt: &str) -> Vec { let mut history_message = "*Conversation:*\n".to_string(); for m in messages.iter().rev().take(2) { - let content = m.content.content_text_only(); - let limited_content = if content.chars().count() > 5000 { - let skip_count = content.chars().count() - 5000; - format!("...{}", content.chars().skip(skip_count).collect::()) + let content = m.content.to_text_with_image_placeholders(); + let char_count = content.chars().count(); + let limited_content = if char_count > 5000 { + let skip_count = char_count - 5000; + format!( + "...{}", + content.chars().skip(skip_count).collect::() + ) } else { content }; let message_row = match m.role.as_str() { - "user" => { - format!("👤:{}\n\n", limited_content) - } - "assistant" => { - format!("🤖:{}\n\n", limited_content) - } - _ => { - continue; - } + "user" => format!("👤:{}\n\n", limited_content), + "assistant" => format!("🤖:{}\n\n", limited_content), + _ => continue, }; history_message.insert_str(0, &message_row); } vec![ - ChatMessage::new("system".to_string(), PROMPT.to_string()), + ChatMessage::new("system".to_string(), system_prompt.to_string()), ChatMessage::new("user".to_string(), history_message), ] } @@ -74,51 +47,31 @@ fn _make_conversation( pub async fn generate_follow_up_message( messages: Vec, gcx: Arc>, - model_id: &str, - chat_id: &str, + _model_id: &str, + _chat_id: &str, ) -> Result { - let ccx = Arc::new(AMutex::new(AtCommandsContext::new( - gcx.clone(), - 32000, - 1, - false, - messages.clone(), - chat_id.to_string(), - false, - model_id.to_string(), - ).await)); - let updated_messages: Vec> = subchat_single( - ccx.clone(), - model_id, - _make_conversation(&messages), - Some(vec![]), - None, - false, - Some(0.0), - None, - 1, - None, - true, - None, - None, - None, - ).await?; - let response = updated_messages - .into_iter() - .next() - .map(|x| { - x.into_iter().last().map(|last_m| match last_m.content { - ChatContent::SimpleText(text) => Some(text), - ChatContent::Multimodal(_) => None, - }) + let subagent_config = get_subagent_config(gcx.clone(), SUBAGENT_ID, None) + .await + .ok_or_else(|| format!("subagent config '{}' not found", SUBAGENT_ID))?; + + let system_prompt = subagent_config.messages.system_prompt + .as_ref() + .ok_or_else(|| format!("messages.system_prompt not defined for subagent '{}'", SUBAGENT_ID))?; + + let result = run_subchat_once(gcx, SUBAGENT_ID, _make_conversation(&messages, system_prompt)).await?; + + let response = result + .messages + .last() + .and_then(|last_m| match &last_m.content { + ChatContent::SimpleText(text) => Some(text.clone()), + _ => None, }) - .flatten() - .flatten() .ok_or("No follow-up message was generated".to_string())?; tracing::info!("follow-up model says {:?}", response); - let response: FollowUpResponse = json_utils::extract_json_object(&response) - .map_err_with_prefix("Failed to parse json:")?; + let response: FollowUpResponse = + json_utils::extract_json_object(&response).map_err_with_prefix("Failed to parse json:")?; Ok(response) } diff --git a/refact-agent/engine/src/agentic/mod.rs b/refact-agent/engine/src/agentic/mod.rs index 6c3f144b9..712c8d48e 100644 --- a/refact-agent/engine/src/agentic/mod.rs +++ b/refact-agent/engine/src/agentic/mod.rs @@ -1,4 +1,5 @@ +pub mod compress_trajectory; +pub mod generate_code_edit; pub mod generate_commit_message; pub mod generate_follow_up_message; -pub mod compress_trajectory; -pub mod generate_code_edit; \ No newline at end of file +pub mod mode_transition; diff --git a/refact-agent/engine/src/agentic/mode_transition.rs b/refact-agent/engine/src/agentic/mode_transition.rs new file mode 100644 index 000000000..b158233a6 --- /dev/null +++ b/refact-agent/engine/src/agentic/mode_transition.rs @@ -0,0 +1,1218 @@ +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::Arc; +use lazy_static::lazy_static; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use tokio::sync::RwLock as ARwLock; + +use crate::call_validation::{ChatContent, ChatMessage, ContextFile}; +use crate::global_context::GlobalContext; +use crate::subchat::run_subchat_once; +use crate::yaml_configs::customization_registry::get_subagent_config; + +const SUBAGENT_ID: &str = "mode_transition"; +const MAX_FILE_SIZE: usize = 1024 * 1024; // 1MB max file size + +lazy_static! { + static ref MEMORY_PATH_REGEX: Regex = Regex::new( + r"(?:^|[\s\n])(/[^\s]+\.refact/(?:knowledge|trajectories|tasks/[^/]+/memories)/[^\s\n,)]+\.(?:md|json))" + ).expect("Invalid memory path regex"); + + static ref FILE_PATH_REGEX: Regex = Regex::new( + r"(?m)^\s*(?:File|Path):\s*(\S+)" + ).expect("Invalid file path regex"); + + static ref DIFF_GIT_REGEX: Regex = Regex::new( + r"(?m)^(?:diff --git [ab]/(\S+)|[+]{3} [ab]/(\S+))" + ).expect("Invalid diff git regex"); +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FileReference { + pub path: String, + pub source: String, + pub msg_id: String, +} + +#[derive(Debug, Clone, Default)] +pub struct ConversationMetadata { + pub annotated_messages: Vec<(String, ChatMessage)>, + pub context_files: Vec, + pub edited_files: Vec, + pub memory_paths: Vec, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ParsedDecisions { + pub summary: String, + pub files_to_open: Vec, + pub messages_to_preserve: Vec, + pub memories_to_include: Vec, + pub tool_outputs_to_include: Vec, + pub pending_tasks: Vec, + pub handoff_message: String, +} + + + +pub fn extract_conversation_metadata(messages: &[ChatMessage]) -> ConversationMetadata { + let mut metadata = ConversationMetadata::default(); + let mut seen_files: HashSet = HashSet::new(); + let mut seen_memories: HashSet = HashSet::new(); + + for (idx, msg) in messages.iter().enumerate() { + let msg_id = format!("MSG_ID:{}", idx); + metadata.annotated_messages.push((msg_id.clone(), msg.clone())); + + if msg.role == "context_file" { + match &msg.content { + ChatContent::ContextFiles(files) => { + for file in files { + if seen_files.insert(file.file_name.clone()) { + metadata.context_files.push(FileReference { + path: file.file_name.clone(), + source: "context_file".to_string(), + msg_id: msg_id.clone(), + }); + } + } + } + ChatContent::SimpleText(text) => { + if let Ok(files) = serde_json::from_str::>(text) { + for file in files { + if seen_files.insert(file.file_name.clone()) { + metadata.context_files.push(FileReference { + path: file.file_name.clone(), + source: "context_file".to_string(), + msg_id: msg_id.clone(), + }); + } + } + } + } + _ => {} + } + } + + if msg.role == "diff" || (msg.role == "tool" && is_diff_content(&msg.content)) { + if let ChatContent::SimpleText(text) = &msg.content { + for cap in FILE_PATH_REGEX.captures_iter(text) { + if let Some(path) = cap.get(1) { + let path_str = clean_path_string(path.as_str()); + if !path_str.is_empty() && seen_files.insert(path_str.clone()) { + metadata.edited_files.push(FileReference { + path: path_str, + source: "diff".to_string(), + msg_id: msg_id.clone(), + }); + } + } + } + for cap in DIFF_GIT_REGEX.captures_iter(text) { + let path_str = cap.get(1).or_else(|| cap.get(2)) + .map(|m| clean_path_string(m.as_str())) + .unwrap_or_default(); + if !path_str.is_empty() && seen_files.insert(path_str.clone()) { + metadata.edited_files.push(FileReference { + path: path_str, + source: "diff".to_string(), + msg_id: msg_id.clone(), + }); + } + } + } + } + + if msg.role == "tool" { + if let ChatContent::SimpleText(text) = &msg.content { + for cap in MEMORY_PATH_REGEX.captures_iter(text) { + if let Some(path) = cap.get(1) { + let path_str = clean_path_string(path.as_str()); + if !path_str.is_empty() && seen_memories.insert(path_str.clone()) { + metadata.memory_paths.push(path_str); + } + } + } + } + } + } + + metadata +} + +fn clean_path_string(s: &str) -> String { + s.trim_end_matches(|c| c == ')' || c == ',' || c == ';' || c == ':' || c == '"' || c == '\'') + .to_string() +} + +fn is_diff_content(content: &ChatContent) -> bool { + match content { + ChatContent::SimpleText(text) => { + text.contains("+++") && text.contains("---") || + text.contains("@@ ") || + text.starts_with("diff ") + } + _ => false, + } +} + +fn parse_xml_tag(content: &str, tag: &str) -> Option { + let open = format!("<{}>", tag); + let close = format!("", tag); + + let start = content.find(&open)?; + let after_open = start + open.len(); + let end = content[after_open..].find(&close)? + after_open; + + if end > after_open { + Some(content[after_open..end].trim().to_string()) + } else { + None + } +} + +fn normalize_list_item(item: &str) -> String { + let mut s = item.trim(); + if s.starts_with('-') || s.starts_with('*') || s.starts_with('+') { + s = s[1..].trim_start(); + } else if let Some(rest) = s.strip_prefix(|c: char| c.is_ascii_digit()) { + let rest = rest.trim_start_matches(|c: char| c.is_ascii_digit()); + if let Some(after) = rest.strip_prefix('.').or_else(|| rest.strip_prefix(')')) { + s = after.trim_start(); + } + } + let s = s.trim_matches('`').trim_matches('"').trim_matches('\'').trim(); + s.to_string() +} + +fn parse_list_tag(content: &str, tag: &str) -> Vec { + parse_xml_tag(content, tag) + .map(|s| { + s.lines() + .map(|l| normalize_list_item(l)) + .filter(|l| !l.is_empty()) + .collect() + }) + .unwrap_or_default() +} + +pub fn parse_llm_response(response: &str) -> ParsedDecisions { + ParsedDecisions { + summary: parse_xml_tag(response, "summary").unwrap_or_default(), + files_to_open: parse_list_tag(response, "files_to_open"), + messages_to_preserve: parse_list_tag(response, "messages_to_preserve"), + memories_to_include: parse_list_tag(response, "memories_to_include"), + tool_outputs_to_include: parse_list_tag(response, "tool_outputs_to_include"), + pending_tasks: parse_list_tag(response, "pending_tasks"), + handoff_message: parse_xml_tag(response, "handoff_message").unwrap_or_default(), + } +} + +fn format_annotated_messages(metadata: &ConversationMetadata) -> String { + let mut result = String::new(); + + for (msg_id, msg) in &metadata.annotated_messages { + let role = &msg.role; + let content_preview = match &msg.content { + ChatContent::SimpleText(text) => { + truncate_utf8(text, 500) + } + ChatContent::ContextFiles(files) => { + format!("[Context files: {}]", files.iter().map(|f| f.file_name.as_str()).collect::>().join(", ")) + } + ChatContent::Multimodal(elements) => { + let text_parts: Vec = elements.iter() + .filter(|el| el.is_text()) + .map(|el| truncate_utf8(&el.m_content, 200)) + .collect(); + let image_count = elements.iter().filter(|el| el.is_image()).count(); + let text_preview = if text_parts.is_empty() { + String::new() + } else { + text_parts.join(" ") + }; + if image_count > 0 { + format!("{} [contains {} image(s)]", text_preview, image_count) + } else { + text_preview + } + } + }; + + let tool_info = if let Some(tool_calls) = &msg.tool_calls { + if !tool_calls.is_empty() { + let tools: Vec = tool_calls.iter() + .map(|tc| format!("{}({})", tc.function.name, truncate_utf8(&tc.function.arguments, 100))) + .collect(); + format!("\n[tool_calls: {}]", tools.join(", ")) + } else { + String::new() + } + } else { + String::new() + }; + + result.push_str(&format!("[{}] [{}]\n{}{}\n\n", msg_id, role, content_preview, tool_info)); + } + + result +} + +fn format_file_list(metadata: &ConversationMetadata) -> String { + let mut lines = Vec::new(); + + for file_ref in &metadata.context_files { + lines.push(format!("- {} (from {}, {})", file_ref.path, file_ref.source, file_ref.msg_id)); + } + + for file_ref in &metadata.edited_files { + lines.push(format!("- {} (edited, from {}, {})", file_ref.path, file_ref.source, file_ref.msg_id)); + } + + if lines.is_empty() { + "No files found in conversation".to_string() + } else { + lines.join("\n") + } +} + +fn format_memory_list(metadata: &ConversationMetadata) -> String { + if metadata.memory_paths.is_empty() { + "No memory/knowledge files found".to_string() + } else { + metadata.memory_paths.iter() + .map(|p| format!("- {}", p)) + .collect::>() + .join("\n") + } +} + +fn truncate_utf8(s: &str, max_chars: usize) -> String { + let char_count = s.chars().count(); + if char_count <= max_chars { + s.to_string() + } else { + let truncated: String = s.chars().take(max_chars).collect(); + format!("{}...", truncated) + } +} + +pub async fn analyze_mode_transition( + gcx: Arc>, + messages: &[ChatMessage], + target_mode: &str, + target_mode_description: &str, +) -> Result { + if messages.is_empty() { + return Err("The provided chat is empty".to_string()); + } + + let subagent_config = get_subagent_config(gcx.clone(), SUBAGENT_ID, None) + .await + .ok_or_else(|| format!("subagent config '{}' not found", SUBAGENT_ID))?; + + let user_template = subagent_config.messages.user_template + .as_ref() + .ok_or_else(|| format!("messages.user_template not defined for subagent '{}'", SUBAGENT_ID))?; + + let metadata = extract_conversation_metadata(messages); + + let annotated_message_list = format_annotated_messages(&metadata); + let file_list = format_file_list(&metadata); + let memory_list = format_memory_list(&metadata); + + let user_prompt = user_template + .replace("{target_mode}", target_mode) + .replace("{target_mode_description}", target_mode_description) + .replace("{annotated_message_list}", &annotated_message_list) + .replace("{file_list}", &file_list) + .replace("{memory_list}", &memory_list); + + let analysis_messages = vec![ + ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(user_prompt), + ..Default::default() + }, + ]; + + let result = run_subchat_once(gcx, SUBAGENT_ID, analysis_messages) + .await + .map_err(|e| format!("Error analyzing mode transition: {}", e))?; + + let response_text = result + .messages + .last() + .and_then(|msg| match &msg.content { + ChatContent::SimpleText(text) => Some(text.clone()), + _ => None, + }) + .ok_or("No analysis response was generated".to_string())?; + + Ok(parse_llm_response(&response_text)) +} + +fn find_task_done_report(messages: &[ChatMessage]) -> Option { + let mut task_done_call_id: Option = None; + for msg in messages.iter().rev() { + if msg.role == "assistant" { + if let Some(tool_calls) = &msg.tool_calls { + for tc in tool_calls { + if tc.function.name == "task_done" { + task_done_call_id = Some(tc.id.clone()); + break; + } + } + } + if task_done_call_id.is_some() { + break; + } + } + } + + let call_id = task_done_call_id?; + + for msg in messages.iter().rev() { + if msg.role == "tool" && msg.tool_call_id == call_id { + if let ChatContent::SimpleText(text) = &msg.content { + if let Ok(obj) = serde_json::from_str::(text) { + let summary = obj.get("summary").and_then(|v| v.as_str()).unwrap_or(""); + let report = obj.get("report").and_then(|v| v.as_str()).unwrap_or(""); + let files_changed: Vec<&str> = obj.get("files_changed") + .and_then(|v| v.as_array()) + .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect()) + .unwrap_or_default(); + + let mut result = String::new(); + if !summary.is_empty() { + result.push_str(&format!("**{}**\n\n", summary)); + } + if !report.is_empty() { + result.push_str(report); + } + if !files_changed.is_empty() { + result.push_str("\n\n**Files changed:**\n"); + for f in &files_changed { + result.push_str(&format!("- `{}`\n", f)); + } + } + if !result.is_empty() { + return Some(result); + } + } + } + } + } + + None +} + +fn resolve_tool_name_for_output(metadata: &ConversationMetadata, tool_call_id: &str) -> String { + if tool_call_id.is_empty() { + return "tool".to_string(); + } + for (_, msg) in &metadata.annotated_messages { + if msg.role == "assistant" { + if let Some(tool_calls) = &msg.tool_calls { + for tc in tool_calls { + if tc.id == tool_call_id { + return tc.function.name.clone(); + } + } + } + } + } + "tool".to_string() +} + +fn format_conversation_entry(msg: &ChatMessage, metadata: &ConversationMetadata) -> String { + match msg.role.as_str() { + "user" => { + let text = extract_text_content(&msg.content); + if text.trim().is_empty() { + return String::new(); + } + format!("### 👤 User\n\n{}", text.trim()) + } + "assistant" => { + let text = extract_text_content(&msg.content); + let tool_calls_md = if let Some(tool_calls) = &msg.tool_calls { + if !tool_calls.is_empty() { + let calls: Vec = tool_calls.iter() + .map(|tc| { + let args_preview = truncate_utf8(&tc.function.arguments, 120); + format!("- `{}({})`", tc.function.name, args_preview) + }) + .collect(); + format!("\n\n**Tool calls:**\n{}", calls.join("\n")) + } else { + String::new() + } + } else { + String::new() + }; + if text.trim().is_empty() && tool_calls_md.is_empty() { + return String::new(); + } + let mut result = "### 🤖 Assistant\n\n".to_string(); + if !text.trim().is_empty() { + result.push_str(text.trim()); + } + result.push_str(&tool_calls_md); + result + } + "tool" => { + let text = extract_text_content(&msg.content); + if text.trim().is_empty() { + return String::new(); + } + let tool_name = resolve_tool_name_for_output(metadata, &msg.tool_call_id); + let truncated = truncate_utf8(text.trim(), 10000); + format!("### 🔧 Tool: `{}`\n\n```\n{}\n```", tool_name, truncated) + } + "system" => { + let text = extract_text_content(&msg.content); + if text.trim().is_empty() { + return String::new(); + } + format!("### ⚙️ System\n\n{}", text.trim()) + } + _ => String::new(), + } +} + +fn extract_text_content(content: &ChatContent) -> String { + match content { + ChatContent::SimpleText(text) => text.clone(), + ChatContent::Multimodal(elements) => { + elements.iter() + .filter_map(|el| { + if el.is_text() { Some(el.m_content.clone()) } else { None } + }) + .collect::>() + .join("\n") + } + ChatContent::ContextFiles(_) => String::new(), + } +} + +pub async fn assemble_new_chat( + gcx: Arc>, + original_messages: &[ChatMessage], + decisions: &ParsedDecisions, +) -> Result, String> { + let metadata = extract_conversation_metadata(original_messages); + let mut new_messages: Vec = Vec::new(); + let workspace_dirs = crate::files_correction::get_project_dirs(gcx.clone()).await; + + let allowed_files: HashSet<&str> = metadata.context_files.iter() + .map(|f| f.path.as_str()) + .chain(metadata.edited_files.iter().map(|f| f.path.as_str())) + .collect(); + let allowed_memories: HashSet<&str> = metadata.memory_paths.iter() + .map(|s| s.as_str()) + .collect(); + + // 1. All files batched in a single context_file message + let mut file_contents: Vec = Vec::new(); + for path in &decisions.files_to_open { + if !allowed_files.contains(path.as_str()) { + tracing::warn!("Skipping file {} - not in conversation allowlist", path); + continue; + } + match read_file_content_safe(gcx.clone(), path, &workspace_dirs).await { + Ok(content) => { + file_contents.push(ContextFile { + file_name: path.clone(), + file_content: content.clone(), + line1: 1, + line2: content.lines().count(), + ..Default::default() + }); + } + Err(e) => { + tracing::warn!("Failed to read file {}: {}", path, e); + } + } + } + if !file_contents.is_empty() { + new_messages.push(ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles(file_contents), + ..Default::default() + }); + } + + // 2. All memories batched in a single context_file message + let mut memory_contents: Vec = Vec::new(); + for memory_path in &decisions.memories_to_include { + if !allowed_memories.contains(memory_path.as_str()) { + tracing::warn!("Skipping memory {} - not in conversation allowlist", memory_path); + continue; + } + match read_file_content_safe(gcx.clone(), memory_path, &workspace_dirs).await { + Ok(content) => { + memory_contents.push(ContextFile { + file_name: memory_path.clone(), + file_content: content.clone(), + line1: 1, + line2: content.lines().count(), + ..Default::default() + }); + } + Err(e) => { + tracing::warn!("Failed to read memory {}: {}", memory_path, e); + } + } + } + if !memory_contents.is_empty() { + new_messages.push(ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles(memory_contents), + ..Default::default() + }); + } + + // 3. "Previous Conversation" message: preserved messages + tool outputs interleaved in order, summary at end + let mut preserved_indices: HashSet = decisions.messages_to_preserve + .iter() + .filter_map(|msg_id_ref| { + let id = msg_id_ref.trim_start_matches("MSG_ID:"); + id.parse::().ok() + }) + .collect(); + let tool_output_indices: HashSet = decisions.tool_outputs_to_include + .iter() + .filter_map(|msg_id_ref| { + let id = msg_id_ref.trim_start_matches("MSG_ID:"); + id.parse::().ok() + }) + .collect(); + preserved_indices.extend(&tool_output_indices); + + let mut all_indices: Vec = preserved_indices.into_iter().collect(); + all_indices.sort(); + all_indices.dedup(); + + let mut conversation_parts: Vec = Vec::new(); + let mut preserved_images: Vec = Vec::new(); + for idx in &all_indices { + if let Some((_, msg)) = metadata.annotated_messages.get(*idx) { + let formatted = format_conversation_entry(msg, &metadata); + if !formatted.is_empty() { + conversation_parts.push(formatted); + } + if let ChatContent::Multimodal(elements) = &msg.content { + for el in elements { + if el.is_image() { + preserved_images.push(el.clone()); + } + } + } + } + } + + let has_conversation = !conversation_parts.is_empty() || !decisions.summary.is_empty(); + if has_conversation { + let mut conversation_text = String::new(); + if !conversation_parts.is_empty() { + conversation_text.push_str("## Previous Conversation\n\n"); + conversation_text.push_str(&conversation_parts.join("\n\n---\n\n")); + } + if !decisions.summary.is_empty() { + if !conversation_text.is_empty() { + conversation_text.push_str("\n\n---\n\n"); + } + conversation_text.push_str(&format!("## Summary\n\n{}", decisions.summary)); + } + + if preserved_images.is_empty() { + new_messages.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(conversation_text), + ..Default::default() + }); + } else { + match crate::scratchpads::multimodality::MultimodalElement::new("text".to_string(), conversation_text.clone()) { + Ok(text_element) => { + let mut elements = vec![text_element]; + elements.extend(preserved_images); + new_messages.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::Multimodal(elements), + ..Default::default() + }); + } + Err(_) => { + new_messages.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(conversation_text), + ..Default::default() + }); + } + } + } + } + + // 4. Task done report as a separate message (if present) + let task_done_report = find_task_done_report(original_messages); + if let Some(report) = &task_done_report { + new_messages.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(format!("## Task Completion Report\n\n{}", report)), + ..Default::default() + }); + } + + // 5. Handoff message (with pending tasks only if no task_done report) + let mut handoff_text = String::new(); + if task_done_report.is_none() && !decisions.pending_tasks.is_empty() { + let tasks = decisions.pending_tasks.iter() + .map(|t| format!("- {}", t)) + .collect::>() + .join("\n"); + handoff_text.push_str(&format!("## Pending Tasks\n\n{}\n\n---\n\n", tasks)); + } + if !decisions.handoff_message.is_empty() { + handoff_text.push_str(&decisions.handoff_message); + } + if !handoff_text.is_empty() { + new_messages.push(ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(handoff_text), + ..Default::default() + }); + } + + Ok(new_messages) +} + + + +async fn read_file_content_safe( + _gcx: Arc>, + path: &str, + workspace_dirs: &[PathBuf], +) -> Result { + let full_path = if std::path::Path::new(path).is_absolute() { + PathBuf::from(path) + } else if let Some(workspace) = workspace_dirs.first() { + workspace.join(path) + } else { + return Err("No workspace directory available".to_string()); + }; + + let canonical_path = full_path.canonicalize() + .map_err(|e| format!("Failed to canonicalize path {}: {}", full_path.display(), e))?; + + let is_in_workspace = workspace_dirs.iter().any(|ws| { + if let Ok(canonical_ws) = ws.canonicalize() { + canonical_path.starts_with(&canonical_ws) + } else { + false + } + }); + + let is_refact_path = canonical_path.to_string_lossy().contains(".refact/"); + + if !is_in_workspace && !is_refact_path { + return Err(format!( + "Path {} is outside allowed directories", + canonical_path.display() + )); + } + + let metadata = tokio::fs::metadata(&canonical_path) + .await + .map_err(|e| format!("Failed to get metadata for {}: {}", canonical_path.display(), e))?; + + if metadata.len() > MAX_FILE_SIZE as u64 { + return Err(format!( + "File {} is too large ({} bytes, max {} bytes)", + canonical_path.display(), + metadata.len(), + MAX_FILE_SIZE + )); + } + + tokio::fs::read_to_string(&canonical_path) + .await + .map_err(|e| format!("Failed to read file {}: {}", canonical_path.display(), e)) +} + + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_xml_tag() { + let content = r#" + +This is a test summary. +Multiple lines. + +"#; + let result = parse_xml_tag(content, "summary"); + assert!(result.is_some()); + assert!(result.unwrap().contains("This is a test summary")); + } + + #[test] + fn test_parse_xml_tag_missing() { + let content = "No tags here"; + let result = parse_xml_tag(content, "summary"); + assert!(result.is_none()); + } + + #[test] + fn test_parse_list_tag() { + let content = r#" + +/src/main.rs +/src/config.rs +/src/lib.rs + +"#; + let result = parse_list_tag(content, "files_to_open"); + assert_eq!(result.len(), 3); + assert_eq!(result[0], "/src/main.rs"); + assert_eq!(result[1], "/src/config.rs"); + assert_eq!(result[2], "/src/lib.rs"); + } + + #[test] + fn test_parse_list_tag_empty() { + let content = r#" + + +"#; + let result = parse_list_tag(content, "files_to_open"); + assert!(result.is_empty()); + } + + #[test] + fn test_parse_llm_response_complete() { + let response = r#" + +Building JWT auth system for Axum API. +Token generation complete. + + + +/src/auth.rs +/src/config.rs + + + +MSG_ID:1 +MSG_ID:8 + + + +/project/.refact/knowledge/jwt-design.md + + + +MSG_ID:7 +MSG_ID:15 + + + +Implement refresh tokens +Add rate limiting + + + +Continue with refresh token implementation. + +"#; + let decisions = parse_llm_response(response); + + assert!(decisions.summary.contains("JWT auth system")); + assert_eq!(decisions.files_to_open.len(), 2); + assert_eq!(decisions.messages_to_preserve.len(), 2); + assert_eq!(decisions.memories_to_include.len(), 1); + assert_eq!(decisions.tool_outputs_to_include.len(), 2); + assert_eq!(decisions.tool_outputs_to_include[0], "MSG_ID:7"); + assert_eq!(decisions.pending_tasks.len(), 2); + assert!(decisions.handoff_message.contains("refresh token")); + } + + #[test] + fn test_extract_conversation_metadata_basic() { + let messages = vec![ + ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText("Hello".to_string()), + ..Default::default() + }, + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("Hi there".to_string()), + ..Default::default() + }, + ]; + + let metadata = extract_conversation_metadata(&messages); + assert_eq!(metadata.annotated_messages.len(), 2); + assert_eq!(metadata.annotated_messages[0].0, "MSG_ID:0"); + assert_eq!(metadata.annotated_messages[1].0, "MSG_ID:1"); + } + + #[test] + fn test_extract_conversation_metadata_with_context_files() { + let messages = vec![ + ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles(vec![ + ContextFile { + file_name: "/src/main.rs".to_string(), + file_content: "fn main() {}".to_string(), + line1: 1, + line2: 1, + ..Default::default() + }, + ]), + ..Default::default() + }, + ]; + + let metadata = extract_conversation_metadata(&messages); + assert_eq!(metadata.context_files.len(), 1); + assert_eq!(metadata.context_files[0].path, "/src/main.rs"); + } + + #[test] + fn test_is_diff_content() { + let diff_content = ChatContent::SimpleText( + "--- a/file.rs\n+++ b/file.rs\n@@ -1,3 +1,4 @@".to_string() + ); + assert!(is_diff_content(&diff_content)); + + let non_diff = ChatContent::SimpleText("Just some text".to_string()); + assert!(!is_diff_content(&non_diff)); + } + + #[test] + fn test_truncate_utf8_ascii() { + let text = "Hello, World!"; + assert_eq!(truncate_utf8(text, 5), "Hello..."); + assert_eq!(truncate_utf8(text, 100), "Hello, World!"); + } + + #[test] + fn test_truncate_utf8_unicode() { + let text = "Hello 👋 World 🌍!"; + let result = truncate_utf8(text, 8); + assert!(result.ends_with("...")); + for i in 0..20 { + let _ = truncate_utf8(text, i); + } + } + + #[test] + fn test_truncate_utf8_cyrillic() { + let text = "Привет мир"; + let result = truncate_utf8(text, 6); + assert_eq!(result, "Привет..."); + } + + #[test] + fn test_parse_xml_tag_close_before_open() { + let content = "Some text with and then actual content"; + let result = parse_xml_tag(content, "summary"); + assert!(result.is_some()); + assert_eq!(result.unwrap(), "actual content"); + } + + #[test] + fn test_parse_xml_tag_multiple_tags() { + let content = r#" +First summary +Some text +Second summary +"#; + let result = parse_xml_tag(content, "summary"); + assert!(result.is_some()); + assert_eq!(result.unwrap(), "First summary"); + } + + #[test] + fn test_parse_xml_tag_missing_close() { + let content = "Content without close tag"; + let result = parse_xml_tag(content, "summary"); + assert!(result.is_none()); + } + + #[test] + fn test_memory_path_extraction_tasks() { + let tool_output = r#" +Memory saved successfully. +File: /project/.refact/tasks/task-123/memories/2024-01-15_abc123_jwt-decision.md +Task: task-123 +"#; + let messages = vec![ + ChatMessage { + role: "tool".to_string(), + content: ChatContent::SimpleText(tool_output.to_string()), + ..Default::default() + }, + ]; + + let metadata = extract_conversation_metadata(&messages); + assert_eq!(metadata.memory_paths.len(), 1); + assert!(metadata.memory_paths[0].contains(".refact/tasks/")); + assert!(metadata.memory_paths[0].contains("/memories/")); + } + + #[test] + fn test_memory_path_extraction_knowledge() { + let tool_output = "Loaded: /home/user/project/.refact/knowledge/2024-01-15_design.md"; + let messages = vec![ + ChatMessage { + role: "tool".to_string(), + content: ChatContent::SimpleText(tool_output.to_string()), + ..Default::default() + }, + ]; + + let metadata = extract_conversation_metadata(&messages); + assert_eq!(metadata.memory_paths.len(), 1); + assert!(metadata.memory_paths[0].contains(".refact/knowledge/")); + } + + #[test] + fn test_diff_git_extraction() { + let diff_content = r#" +diff --git a/src/auth.rs b/src/auth.rs +index 1234567..abcdefg 100644 +--- a/src/auth.rs ++++ b/src/auth.rs +@@ -1,3 +1,4 @@ ++use jwt::Token; +"#; + let messages = vec![ + ChatMessage { + role: "tool".to_string(), + content: ChatContent::SimpleText(diff_content.to_string()), + ..Default::default() + }, + ]; + + let metadata = extract_conversation_metadata(&messages); + assert!(!metadata.edited_files.is_empty()); + assert!(metadata.edited_files.iter().any(|f| f.path.contains("auth.rs"))); + } + + #[test] + fn test_clean_path_string() { + assert_eq!(clean_path_string("/path/to/file.rs"), "/path/to/file.rs"); + assert_eq!(clean_path_string("/path/to/file.rs)"), "/path/to/file.rs"); + assert_eq!(clean_path_string("/path/to/file.rs,"), "/path/to/file.rs"); + assert_eq!(clean_path_string("/path/to/file.rs\""), "/path/to/file.rs"); + } + + #[test] + fn test_normalize_list_item_bullets() { + assert_eq!(normalize_list_item("- /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item("* /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item("+ /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item(" - /src/main.rs"), "/src/main.rs"); + } + + #[test] + fn test_normalize_list_item_numbered() { + assert_eq!(normalize_list_item("1. /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item("1) /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item("12. /src/main.rs"), "/src/main.rs"); + assert_eq!(normalize_list_item(" 3) /src/main.rs"), "/src/main.rs"); + } + + #[test] + fn test_normalize_list_item_backticks() { + assert_eq!(normalize_list_item("`/src/main.rs`"), "/src/main.rs"); + assert_eq!(normalize_list_item("- `/src/main.rs`"), "/src/main.rs"); + assert_eq!(normalize_list_item("1. `/src/main.rs`"), "/src/main.rs"); + } + + #[test] + fn test_normalize_list_item_quotes() { + assert_eq!(normalize_list_item("\"/src/main.rs\""), "/src/main.rs"); + assert_eq!(normalize_list_item("'/src/main.rs'"), "/src/main.rs"); + } + + #[test] + fn test_normalize_list_item_msg_id() { + assert_eq!(normalize_list_item("- MSG_ID:5"), "MSG_ID:5"); + assert_eq!(normalize_list_item("1) MSG_ID:12"), "MSG_ID:12"); + } + + #[test] + fn test_format_conversation_entry_user() { + let metadata = ConversationMetadata::default(); + let msg = ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText("Please help me with this code".to_string()), + ..Default::default() + }; + let result = format_conversation_entry(&msg, &metadata); + assert!(result.contains("### 👤 User")); + assert!(result.contains("Please help me with this code")); + } + + #[test] + fn test_format_conversation_entry_assistant_with_tools() { + use crate::call_validation::{ChatToolCall, ChatToolFunction}; + let metadata = ConversationMetadata::default(); + let msg = ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("I'll search for the file.".to_string()), + tool_calls: Some(vec![ChatToolCall { + id: "call_123".to_string(), + index: None, + function: ChatToolFunction { + name: "search".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }; + let result = format_conversation_entry(&msg, &metadata); + assert!(result.contains("### 🤖 Assistant")); + assert!(result.contains("`search({})`")); + assert!(result.contains("I'll search for the file.")); + } + + #[test] + fn test_format_conversation_entry_skips_context_file() { + let metadata = ConversationMetadata::default(); + let msg = ChatMessage { + role: "context_file".to_string(), + content: ChatContent::SimpleText("file content".to_string()), + ..Default::default() + }; + let result = format_conversation_entry(&msg, &metadata); + assert!(result.is_empty()); + } + + #[test] + fn test_format_conversation_entry_tool_resolves_name() { + use crate::call_validation::{ChatToolCall, ChatToolFunction}; + let metadata = ConversationMetadata { + annotated_messages: vec![ + ("MSG_ID:0".to_string(), ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("Let me search.".to_string()), + tool_calls: Some(vec![ChatToolCall { + id: "call_abc".to_string(), + index: None, + function: ChatToolFunction { + name: "grep".to_string(), + arguments: r#"{"query":"test"}"#.to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }), + ("MSG_ID:1".to_string(), ChatMessage { + role: "tool".to_string(), + tool_call_id: "call_abc".to_string(), + content: ChatContent::SimpleText("Found 3 results".to_string()), + ..Default::default() + }), + ], + ..Default::default() + }; + let tool_msg = &metadata.annotated_messages[1].1; + let result = format_conversation_entry(tool_msg, &metadata); + assert!(result.contains("### 🔧 Tool: `grep`")); + assert!(result.contains("Found 3 results")); + } + + #[test] + fn test_messages_to_preserve_sorted_by_index() { + let decisions = parse_llm_response(r#" +Test summary + + +MSG_ID:10 +MSG_ID:2 +MSG_ID:5 +MSG_ID:2 + + + + +Continue +"#); + assert_eq!(decisions.messages_to_preserve, vec!["MSG_ID:10", "MSG_ID:2", "MSG_ID:5", "MSG_ID:2"]); + } + + #[test] + fn test_find_task_done_report() { + use crate::call_validation::{ChatToolCall, ChatToolFunction}; + + let messages = vec![ + ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText("Do the task".to_string()), + ..Default::default() + }, + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("I'll complete this task.".to_string()), + tool_calls: Some(vec![ChatToolCall { + id: "call_123".to_string(), + index: None, + function: ChatToolFunction { + name: "task_done".to_string(), + arguments: r#"{"report": "Detailed report here", "summary": "All done"}"#.to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }, + ChatMessage { + role: "tool".to_string(), + tool_call_id: "call_123".to_string(), + content: ChatContent::SimpleText( + r#"{"type":"task_done","summary":"All done","report":"Detailed report here","files_changed":["src/main.rs"]}"#.to_string() + ), + ..Default::default() + }, + ]; + + let report = find_task_done_report(&messages); + assert!(report.is_some()); + let report_text = report.unwrap(); + assert!(report_text.contains("**All done**")); + assert!(report_text.contains("Detailed report here")); + assert!(report_text.contains("`src/main.rs`")); + } + + #[test] + fn test_find_task_done_report_no_task_done() { + let messages = vec![ + ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText("Hello".to_string()), + ..Default::default() + }, + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("Hi there!".to_string()), + ..Default::default() + }, + ]; + + let report = find_task_done_report(&messages); + assert!(report.is_none()); + } +} diff --git a/refact-agent/engine/src/ast/ast_db.rs b/refact-agent/engine/src/ast/ast_db.rs index e69b150cb..56e5da418 100644 --- a/refact-agent/engine/src/ast/ast_db.rs +++ b/refact-agent/engine/src/ast/ast_db.rs @@ -10,7 +10,9 @@ use lazy_static::lazy_static; use regex::Regex; use crate::ast::ast_structs::{AstDB, AstDefinition, AstCounters, AstErrorStats}; -use crate::ast::ast_parse_anything::{parse_anything_and_add_file_path, filesystem_path_to_double_colon_path}; +use crate::ast::ast_parse_anything::{ + parse_anything_and_add_file_path, filesystem_path_to_double_colon_path, +}; use crate::custom_error::MapErrToString; use crate::fuzzy_search::fuzzy_search; @@ -59,7 +61,6 @@ use crate::fuzzy_search::fuzzy_search; // // Read tests below, the show what this index can do! - const MAX_DB_SIZE: usize = 10 * 1024 * 1024 * 1024; // 10GB const A_LOT_OF_PRINTS: bool = false; @@ -71,8 +72,7 @@ macro_rules! debug_print { }; } -pub async fn ast_index_init(ast_permanent: String, ast_max_files: usize) -> Arc -{ +pub async fn ast_index_init(ast_permanent: String, ast_max_files: usize) -> Arc { let db_temp_dir = if ast_permanent.is_empty() { Some(tempfile::TempDir::new().expect("Failed to create tempdir")) } else { @@ -85,19 +85,30 @@ pub async fn ast_index_init(ast_permanent: String, ast_max_files: usize) -> Arc< }; tracing::info!("starting AST db, ast_permanent={:?}", ast_permanent); - let db_env: Arc = Arc::new(task::spawn_blocking(move || { - let mut options = heed::EnvOpenOptions::new(); - options.map_size(MAX_DB_SIZE); - options.max_dbs(10); - unsafe { options.open(db_path).unwrap() } - }).await.unwrap()); - - let db: Arc> = Arc::new(db_env.write_txn().map(|mut txn| { - let db = db_env.create_database(&mut txn, Some("ast")).expect("Failed to create ast db"); - let _ = db.clear(&mut txn); - txn.commit().expect("Failed to commit to lmdb env"); - db - }).expect("Failed to start transaction to create ast db")); + let db_env: Arc = Arc::new( + task::spawn_blocking(move || { + let mut options = heed::EnvOpenOptions::new(); + options.map_size(MAX_DB_SIZE); + options.max_dbs(10); + unsafe { options.open(db_path).unwrap() } + }) + .await + .unwrap(), + ); + + let db: Arc> = Arc::new( + db_env + .write_txn() + .map(|mut txn| { + let db = db_env + .create_database(&mut txn, Some("ast")) + .expect("Failed to create ast db"); + let _ = db.clear(&mut txn); + txn.commit().expect("Failed to commit to lmdb env"); + db + }) + .expect("Failed to start transaction to create ast db"), + ); tracing::info!("/starting AST"); let ast_index = AstDB { @@ -109,18 +120,23 @@ pub async fn ast_index_init(ast_permanent: String, ast_max_files: usize) -> Arc< Arc::new(ast_index) } -pub fn fetch_counters(ast_index: Arc) -> Result -{ +pub fn fetch_counters(ast_index: Arc) -> Result { let txn = ast_index.db_env.read_txn().unwrap(); - let counter_defs = ast_index.db.get(&txn, "counters|defs") + let counter_defs = ast_index + .db + .get(&txn, "counters|defs") .map_err_with_prefix("Failed to get counters|defs")? .map(|v| serde_cbor::from_slice::(&v).unwrap()) .unwrap_or(0); - let counter_usages = ast_index.db.get(&txn, "counters|usages") + let counter_usages = ast_index + .db + .get(&txn, "counters|usages") .map_err_with_prefix("Failed to get counters|usages")? .map(|v| serde_cbor::from_slice::(&v).unwrap()) .unwrap_or(0); - let counter_docs = ast_index.db.get(&txn, "counters|docs") + let counter_docs = ast_index + .db + .get(&txn, "counters|docs") .map_err_with_prefix("Failed to get counters|docs")? .map(|v| serde_cbor::from_slice::(&v).unwrap()) .unwrap_or(0); @@ -131,15 +147,26 @@ pub fn fetch_counters(ast_index: Arc) -> Result }) } -fn increase_counter<'a>(ast_index: Arc, txn: &mut heed::RwTxn<'a>, counter_key: &str, adjustment: i32) { +fn increase_counter<'a>( + ast_index: Arc, + txn: &mut heed::RwTxn<'a>, + counter_key: &str, + adjustment: i32, +) { if adjustment == 0 { return; } - let new_value = ast_index.db.get(txn, counter_key) + let new_value = ast_index + .db + .get(txn, counter_key) .unwrap_or(None) .map(|v| serde_cbor::from_slice::(v).unwrap()) - .unwrap_or(0) + adjustment; - if let Err(e) = ast_index.db.put(txn, counter_key, &serde_cbor::to_vec(&new_value).unwrap()) { + .unwrap_or(0) + + adjustment; + if let Err(e) = ast_index + .db + .put(txn, counter_key, &serde_cbor::to_vec(&new_value).unwrap()) + { tracing::error!("failed to update counter: {:?}", e); } } @@ -149,10 +176,9 @@ pub async fn doc_add( cpath: &String, text: &String, errors: &mut AstErrorStats, -) -> Result<(Vec>, String), String> -{ +) -> Result<(Vec>, String), String> { let file_global_path = filesystem_path_to_double_colon_path(cpath); - let (defs, language) = parse_anything_and_add_file_path(&cpath, text, errors)?; // errors mostly "no such parser" here + let (defs, language) = parse_anything_and_add_file_path(&cpath, text, errors)?; // errors mostly "no such parser" here let result = ast_index.db_env.write_txn().and_then(|mut txn| { let mut added_defs: i32 = 0; @@ -165,7 +191,11 @@ pub async fn doc_add( let d_key = format!("d|{}", official_path); debug_print!("writing {}", d_key); ast_index.db.put(&mut txn, &d_key, &serialized)?; - let mut path_parts: Vec<&str> = definition.official_path.iter().map(|s| s.as_str()).collect(); + let mut path_parts: Vec<&str> = definition + .official_path + .iter() + .map(|s| s.as_str()) + .collect(); while !path_parts.is_empty() { let c_key = format!("c|{} ⚡ {}", path_parts.join("::"), official_path); ast_index.db.put(&mut txn, &c_key, b"")?; @@ -174,10 +204,23 @@ pub async fn doc_add( for usage in &definition.usages { if !usage.resolved_as.is_empty() { let u_key = format!("u|{} ⚡ {}", usage.resolved_as, official_path); - ast_index.db.put(&mut txn, &u_key, &serde_cbor::to_vec(&usage.uline).unwrap())?; - } else if usage.targets_for_guesswork.len() == 1 && !usage.targets_for_guesswork[0].starts_with("?::") { - let homeless_key = format!("homeless|{} ⚡ {}", usage.targets_for_guesswork[0], official_path); - ast_index.db.put(&mut txn, &homeless_key, &serde_cbor::to_vec(&usage.uline).unwrap())?; + ast_index.db.put( + &mut txn, + &u_key, + &serde_cbor::to_vec(&usage.uline).unwrap(), + )?; + } else if usage.targets_for_guesswork.len() == 1 + && !usage.targets_for_guesswork[0].starts_with("?::") + { + let homeless_key = format!( + "homeless|{} ⚡ {}", + usage.targets_for_guesswork[0], official_path + ); + ast_index.db.put( + &mut txn, + &homeless_key, + &serde_cbor::to_vec(&usage.uline).unwrap(), + )?; debug_print!(" homeless {}", homeless_key); continue; } else { @@ -188,13 +231,17 @@ pub async fn doc_add( // this_is_a_class: cpp🔎CosmicGoat, derived_from: "cpp🔎Goat" "cpp🔎CosmicJustice" for from in &definition.this_class_derived_from { let t_key = format!("classes|{} ⚡ {}", from, official_path); - ast_index.db.put(&mut txn, &t_key, &definition.this_is_a_class.as_bytes())?; + ast_index + .db + .put(&mut txn, &t_key, &definition.this_is_a_class.as_bytes())?; } added_defs += 1; } if unresolved_usages > 0 { let resolve_todo_key = format!("resolve-todo|{}", file_global_path.join("::")); - ast_index.db.put(&mut txn, &resolve_todo_key, &cpath.as_bytes())?; + ast_index + .db + .put(&mut txn, &resolve_todo_key, &cpath.as_bytes())?; } let doc_key = format!("doc-cpath|{}", file_global_path.join("::")); if ast_index.db.get(&txn, &doc_key)?.is_none() { @@ -214,8 +261,7 @@ pub async fn doc_add( Ok((defs.into_iter().map(Arc::new).collect(), language)) } -pub fn doc_remove(ast_index: Arc, cpath: &String) -> () -{ +pub fn doc_remove(ast_index: Arc, cpath: &String) -> () { let file_global_path = filesystem_path_to_double_colon_path(cpath); let d_prefix = format!("d|{}::", file_global_path.join("::")); @@ -228,7 +274,11 @@ pub fn doc_remove(ast_index: Arc, cpath: &String) -> () let mut cursor = ast_index.db.prefix_iter(&txn, &d_prefix)?; while let Some(Ok((d_key, value))) = cursor.next() { if let Ok(definition) = serde_cbor::from_slice::(&value) { - let mut path_parts: Vec<&str> = definition.official_path.iter().map(|s| s.as_str()).collect(); + let mut path_parts: Vec<&str> = definition + .official_path + .iter() + .map(|s| s.as_str()) + .collect(); let official_path = definition.official_path.join("::"); while !path_parts.is_empty() { let c_key = format!("c|{} ⚡ {}", path_parts.join("::"), official_path); @@ -239,8 +289,13 @@ pub fn doc_remove(ast_index: Arc, cpath: &String) -> () if !usage.resolved_as.is_empty() { let u_key = format!("u|{} ⚡ {}", usage.resolved_as, official_path); keys_to_remove.push(u_key); - } else if usage.targets_for_guesswork.len() == 1 && !usage.targets_for_guesswork[0].starts_with("?::") { - let homeless_key = format!("homeless|{} ⚡ {}", usage.targets_for_guesswork[0], official_path); + } else if usage.targets_for_guesswork.len() == 1 + && !usage.targets_for_guesswork[0].starts_with("?::") + { + let homeless_key = format!( + "homeless|{} ⚡ {}", + usage.targets_for_guesswork[0], official_path + ); debug_print!(" homeless {}", homeless_key); keys_to_remove.push(homeless_key); continue; @@ -251,14 +306,20 @@ pub fn doc_remove(ast_index: Arc, cpath: &String) -> () let t_key = format!("classes|{} ⚡ {}", from, official_path); keys_to_remove.push(t_key); } - let cleanup_key = format!("resolve-cleanup|{}", definition.official_path.join("::")); + let cleanup_key = + format!("resolve-cleanup|{}", definition.official_path.join("::")); if let Ok(Some(cleanup_value)) = ast_index.db.get(&txn, &cleanup_key) { - if let Ok(all_saved_ulinks) = serde_cbor::from_slice::>(&cleanup_value) { + if let Ok(all_saved_ulinks) = + serde_cbor::from_slice::>(&cleanup_value) + { for ulink in all_saved_ulinks { keys_to_remove.push(ulink); } } else { - tracing::error!("failed to deserialize cleanup_value for key: {}", cleanup_key); + tracing::error!( + "failed to deserialize cleanup_value for key: {}", + cleanup_key + ); } keys_to_remove.push(cleanup_key); } @@ -278,10 +339,15 @@ pub fn doc_remove(ast_index: Arc, cpath: &String) -> () let doc_key = format!("doc-cpath|{}", file_global_path.join("::")); if ast_index.db.get(&txn, &doc_key)?.is_some() { increase_counter(ast_index.clone(), &mut txn, "counters|docs", -1); - ast_index.db.delete(&mut txn, &doc_key)?; + ast_index.db.delete(&mut txn, &doc_key)?; } increase_counter(ast_index.clone(), &mut txn, "counters|defs", -deleted_defs); - increase_counter(ast_index.clone(), &mut txn, "counters|usages", -deleted_usages); + increase_counter( + ast_index.clone(), + &mut txn, + "counters|usages", + -deleted_usages, + ); txn.commit() }); @@ -291,8 +357,7 @@ pub fn doc_remove(ast_index: Arc, cpath: &String) -> () } } -pub fn doc_defs(ast_index: Arc, cpath: &String) -> Vec> -{ +pub fn doc_defs(ast_index: Arc, cpath: &String) -> Vec> { match ast_index.db_env.read_txn() { Ok(txn) => doc_defs_internal(ast_index.clone(), &txn, cpath), Err(e) => { @@ -302,15 +367,22 @@ pub fn doc_defs(ast_index: Arc, cpath: &String) -> Vec } } -pub fn doc_defs_internal<'a>(ast_index: Arc, txn: &RoTxn<'a>, cpath: &String) -> Vec> { - let d_prefix = format!("d|{}::", filesystem_path_to_double_colon_path(cpath).join("::")); +pub fn doc_defs_internal<'a>( + ast_index: Arc, + txn: &RoTxn<'a>, + cpath: &String, +) -> Vec> { + let d_prefix = format!( + "d|{}::", + filesystem_path_to_double_colon_path(cpath).join("::") + ); let mut defs = Vec::new(); let mut cursor = match ast_index.db.prefix_iter(txn, &d_prefix) { Ok(cursor) => cursor, Err(e) => { tracing::error!("Failed to open prefix iterator: {:?}", e); return Vec::new(); - }, + } }; while let Some(Ok((_, value))) = cursor.next() { if let Ok(definition) = serde_cbor::from_slice::(&value) { @@ -336,7 +408,9 @@ pub async fn doc_usages(ast_index: Arc, cpath: &String) -> Vec<(usize, St let doc_resolved_key = format!("doc-resolved|{}", file_global_path.join("::")); if let Ok(txn) = ast_index.db_env.read_txn() { if let Ok(Some(resolved_usages)) = ast_index.db.get(&txn, &doc_resolved_key) { - if let Ok(resolved_usages_vec) = serde_cbor::from_slice::>(&resolved_usages) { + if let Ok(resolved_usages_vec) = + serde_cbor::from_slice::>(&resolved_usages) + { usages.extend(resolved_usages_vec); } } @@ -369,13 +443,19 @@ impl Default for ConnectUsageContext { } } -pub fn connect_usages(ast_index: Arc, ucx: &mut ConnectUsageContext) -> Result -{ - let mut txn = ast_index.db_env.write_txn() +pub fn connect_usages( + ast_index: Arc, + ucx: &mut ConnectUsageContext, +) -> Result { + let mut txn = ast_index + .db_env + .write_txn() .map_err_with_prefix("Failed to open transaction:")?; let (todo_key, todo_value) = { - let mut cursor = ast_index.db.prefix_iter(&txn, "resolve-todo|") + let mut cursor = ast_index + .db + .prefix_iter(&txn, "resolve-todo|") .map_err_with_prefix("Failed to open db prefix iterator:")?; if let Some(Ok((todo_key, todo_value))) = cursor.next() { (todo_key.to_string(), todo_value.to_vec()) @@ -388,7 +468,10 @@ pub fn connect_usages(ast_index: Arc, ucx: &mut ConnectUsageContext) -> R let cpath = String::from_utf8(todo_value.to_vec()).unwrap(); debug_print!("resolving {}", cpath); - ast_index.db.delete(&mut txn, &todo_key).map_err_with_prefix("Failed to delete resolve-todo| key")?; + ast_index + .db + .delete(&mut txn, &todo_key) + .map_err_with_prefix("Failed to delete resolve-todo| key")?; let definitions = doc_defs_internal(ast_index.clone(), &txn, &cpath); @@ -398,35 +481,45 @@ pub fn connect_usages(ast_index: Arc, ucx: &mut ConnectUsageContext) -> R resolved_usages.extend(tmp); } - ast_index.db.put( - &mut txn, - &format!("doc-resolved|{}", global_file_path), - &serde_cbor::to_vec(&resolved_usages).unwrap(), - ).map_err_with_prefix("Failed to insert doc-resolved:")?; + ast_index + .db + .put( + &mut txn, + &format!("doc-resolved|{}", global_file_path), + &serde_cbor::to_vec(&resolved_usages).unwrap(), + ) + .map_err_with_prefix("Failed to insert doc-resolved:")?; - txn.commit().map_err_with_prefix("Failed to commit transaction:")?; + txn.commit() + .map_err_with_prefix("Failed to commit transaction:")?; Ok(true) } -pub fn connect_usages_look_if_full_reset_needed(ast_index: Arc) -> Result -{ +pub fn connect_usages_look_if_full_reset_needed( + ast_index: Arc, +) -> Result { let class_hierarchy_key = "class-hierarchy|"; let new_derived_from_map = _derived_from(ast_index.clone()).unwrap_or_default(); - let mut txn = ast_index.db_env.write_txn() + let mut txn = ast_index + .db_env + .write_txn() .map_err(|e| format!("Failed to create write transaction: {:?}", e))?; - let existing_hierarchy: IndexMap> = match ast_index.db.get(&txn, class_hierarchy_key) { - Ok(Some(value)) => serde_cbor::from_slice(value).unwrap_or_default(), - Ok(None) => IndexMap::new(), - Err(e) => return Err(format!("Failed to get class hierarchy: {:?}", e)) - }; + let existing_hierarchy: IndexMap> = + match ast_index.db.get(&txn, class_hierarchy_key) { + Ok(Some(value)) => serde_cbor::from_slice(value).unwrap_or_default(), + Ok(None) => IndexMap::new(), + Err(e) => return Err(format!("Failed to get class hierarchy: {:?}", e)), + }; if existing_hierarchy.is_empty() { let serialized_hierarchy = serde_cbor::to_vec(&new_derived_from_map).unwrap(); - ast_index.db.put(&mut txn, class_hierarchy_key, &serialized_hierarchy) + ast_index + .db + .put(&mut txn, class_hierarchy_key, &serialized_hierarchy) .map_err_with_prefix("Failed to put class_hierarchy in db:")?; // First run, serialize and store the new hierarchy } else if new_derived_from_map != existing_hierarchy { @@ -434,13 +527,17 @@ pub fn connect_usages_look_if_full_reset_needed(ast_index: Arc) -> Result existing_hierarchy.len(), new_derived_from_map.len()); let serialized_hierarchy = serde_cbor::to_vec(&new_derived_from_map).unwrap(); - ast_index.db.put(&mut txn, class_hierarchy_key, &serialized_hierarchy) + ast_index + .db + .put(&mut txn, class_hierarchy_key, &serialized_hierarchy) .map_err(|e| format!("Failed to put class hierarchy: {:?}", e))?; let mut keys_to_update = Vec::new(); { - let mut cursor = ast_index.db.prefix_iter(&txn, "doc-cpath|") + let mut cursor = ast_index + .db + .prefix_iter(&txn, "doc-cpath|") .map_err(|e| format!("Failed to create prefix iterator: {:?}", e))?; while let Some(Ok((key, value))) = cursor.next() { @@ -456,12 +553,15 @@ pub fn connect_usages_look_if_full_reset_needed(ast_index: Arc) -> Result tracing::info!("adding {} items to resolve-todo", keys_to_update.len()); for (key, cpath) in keys_to_update { - ast_index.db.put(&mut txn, &key, cpath.as_bytes()) + ast_index + .db + .put(&mut txn, &key, cpath.as_bytes()) .map_err_with_prefix("Failed to put db key to resolve-todo:")?; } } - txn.commit().map_err(|e| format!("Failed to commit transaction: {:?}", e))?; + txn.commit() + .map_err(|e| format!("Failed to commit transaction: {:?}", e))?; Ok(ConnectUsageContext { derived_from_map: new_derived_from_map, @@ -515,7 +615,12 @@ fn _connect_usages_helper<'a>( let mut result = Vec::<(usize, String)>::new(); let mut all_saved_ulinks = Vec::::new(); for (uindex, usage) in definition.usages.iter().enumerate() { - debug_print!(" resolving {}.usage[{}] == {:?}", official_path, uindex, usage); + debug_print!( + " resolving {}.usage[{}] == {:?}", + official_path, + uindex, + usage + ); if !usage.resolved_as.is_empty() { ucx.usages_connected += 1; continue; @@ -528,7 +633,12 @@ fn _connect_usages_helper<'a>( } let to_resolve = to_resolve_unstripped.strip_prefix("?::").unwrap(); // println!("to_resolve_unstripped {:?}", to_resolve_unstripped); - debug_print!(" to resolve {}.usage[{}] guessing {}", official_path, uindex, to_resolve); + debug_print!( + " to resolve {}.usage[{}] guessing {}", + official_path, + uindex, + to_resolve + ); // Extract all LANGUAGE🔎CLASS from to_resolve let mut magnifying_glass_pairs = Vec::new(); @@ -544,13 +654,31 @@ fn _connect_usages_helper<'a>( if magnifying_glass_pairs.len() == 0 { variants.push(to_resolve.to_string()); } else { - let substitutions_of_each_pair: Vec> = magnifying_glass_pairs.iter().map(|(language, klass)| { - let mut substitutions = ucx.derived_from_map.get(format!("{}🔎{}", language, klass).as_str()).cloned().unwrap_or_else(|| vec![]); - substitutions.insert(0, klass.clone()); - substitutions.iter().map(|s| s.strip_prefix(&format!("{}🔎", language)).unwrap_or(s).to_string()).collect() - }).collect(); - - fn generate_combinations(substitutions: &[Vec], index: usize, current: Vec) -> Vec> { + let substitutions_of_each_pair: Vec> = magnifying_glass_pairs + .iter() + .map(|(language, klass)| { + let mut substitutions = ucx + .derived_from_map + .get(format!("{}🔎{}", language, klass).as_str()) + .cloned() + .unwrap_or_else(|| vec![]); + substitutions.insert(0, klass.clone()); + substitutions + .iter() + .map(|s| { + s.strip_prefix(&format!("{}🔎", language)) + .unwrap_or(s) + .to_string() + }) + .collect() + }) + .collect(); + + fn generate_combinations( + substitutions: &[Vec], + index: usize, + current: Vec, + ) -> Vec> { if index == substitutions.len() { return vec![current]; } @@ -562,7 +690,8 @@ fn _connect_usages_helper<'a>( } result } - let intermediate_results = generate_combinations(&substitutions_of_each_pair, 0, Vec::new()); + let intermediate_results = + generate_combinations(&substitutions_of_each_pair, 0, Vec::new()); // Transform each something::LANGUAGE🔎CLASS::something into something::class::something for intermediate_result in intermediate_results { let mut variant = template.clone(); @@ -583,7 +712,9 @@ fn _connect_usages_helper<'a>( let c_prefix = format!("c|{}", v); debug_print!(" scanning {}", c_prefix); // println!(" c_prefix {:?} because v={:?}", c_prefix, v); - let mut c_iter = ast_index.db.prefix_iter(txn, &c_prefix) + let mut c_iter = ast_index + .db + .prefix_iter(txn, &c_prefix) .map_err_with_prefix("Failed to open db range iter:")?; while let Some(Ok((c_key, _))) = c_iter.next() { let parts: Vec<&str> = c_key.split(" ⚡ ").collect(); @@ -605,38 +736,49 @@ fn _connect_usages_helper<'a>( continue; } if found.len() > 1 { - ucx.errstats.add_error(definition.cpath.clone(), usage.uline, &format!("usage `{}` is ambiguous, can mean: {:?}", to_resolve, found)); + ucx.errstats.add_error( + definition.cpath.clone(), + usage.uline, + &format!("usage `{}` is ambiguous, can mean: {:?}", to_resolve, found), + ); ucx.usages_ambiguous += 1; found.truncate(1); } let single_thing_found = found.into_iter().next().unwrap(); let u_key = format!("u|{} ⚡ {}", single_thing_found, official_path); - ast_index.db.put(txn, &u_key, &serde_cbor::to_vec(&usage.uline).unwrap()) + ast_index + .db + .put(txn, &u_key, &serde_cbor::to_vec(&usage.uline).unwrap()) .map_err_with_prefix("Failed to insert key in db:")?; debug_print!(" add {:?} <= {}", u_key, usage.uline); all_saved_ulinks.push(u_key); result.push((usage.uline, single_thing_found)); ucx.usages_connected += 1; - break; // the next thing from targets_for_guesswork is a worse query, keep this one and exit + break; // the next thing from targets_for_guesswork is a worse query, keep this one and exit } } // for usages let cleanup_key = format!("resolve-cleanup|{}", definition.official_path.join("::")); let cleanup_value = serde_cbor::to_vec(&all_saved_ulinks).unwrap(); - ast_index.db.put(txn, &cleanup_key, cleanup_value.as_slice()) + ast_index + .db + .put(txn, &cleanup_key, cleanup_value.as_slice()) .map_err_with_prefix("Failed to insert key in db:")?; Ok(result) } -fn _derived_from(ast_index: Arc) -> Result>, String> -{ +fn _derived_from(ast_index: Arc) -> Result>, String> { // Data example: // classes/cpp🔎Animal ⚡ alt_testsuite::cpp_goat_library::Goat 👉 "cpp🔎Goat" let mut derived_map: IndexMap> = IndexMap::new(); let t_prefix = "classes|"; { - let txn = ast_index.db_env.read_txn() + let txn = ast_index + .db_env + .read_txn() .map_err(|e| format!("Failed to create read transaction: {:?}", e))?; - let mut cursor = ast_index.db.prefix_iter(&txn, t_prefix) + let mut cursor = ast_index + .db + .prefix_iter(&txn, t_prefix) .map_err(|e| format!("Failed to create prefix iterator: {:?}", e))?; while let Some(Ok((key, value))) = cursor.next() { @@ -644,7 +786,11 @@ fn _derived_from(ast_index: Arc) -> Result>, let parts: Vec<&str> = key.split(" ⚡ ").collect(); if parts.len() == 2 { - let parent = parts[0].trim().strip_prefix(t_prefix).unwrap_or(parts[0].trim()).to_string(); + let parent = parts[0] + .trim() + .strip_prefix(t_prefix) + .unwrap_or(parts[0].trim()) + .to_string(); let child = value_string.trim().to_string(); let entry = derived_map.entry(child).or_insert_with(Vec::new); if !entry.contains(&parent) { @@ -672,7 +818,8 @@ fn _derived_from(ast_index: Arc) -> Result>, if let Some(parents) = derived_map.get(klass) { for parent in parents { all_parents.push(parent.clone()); - let ancestors = build_all_derived_from(parent, derived_map, all_derived_from, visited); + let ancestors = + build_all_derived_from(parent, derived_map, all_derived_from, visited); for ancestor in ancestors { if !all_parents.contains(&ancestor) { all_parents.push(ancestor); @@ -692,55 +839,22 @@ fn _derived_from(ast_index: Arc) -> Result>, Ok(all_derived_from) } -/// The best way to get full_official_path is to call definitions() first -pub fn usages(ast_index: Arc, full_official_path: String, limit_n: usize) -> Result, usize)>, String> -{ - let mut usages = Vec::new(); - let u_prefix1 = format!("u|{} ", full_official_path); // this one has space - let u_prefix2 = format!("u|{}", full_official_path); - - let txn = ast_index.db_env.read_txn() - .map_err(|e| format!("Failed to create read transaction: {:?}", e))?; - - let mut cursor = ast_index.db.prefix_iter(&txn, &u_prefix1) - .map_err(|e| format!("Failed to create prefix iterator: {:?}", e))?; - - while let Some(Ok((u_key, u_value))) = cursor.next() { - if usages.len() >= limit_n { - break; - } - - let parts: Vec<&str> = u_key.split(" ⚡ ").collect(); - if parts.len() == 2 && parts[0] == u_prefix2 { - let full_path = parts[1].trim(); - let d_key = format!("d|{}", full_path); - - if let Ok(Some(d_value)) = ast_index.db.get(&txn, &d_key) { - let uline = serde_cbor::from_slice::(&u_value).unwrap_or(0); - - match serde_cbor::from_slice::(&d_value) { - Ok(defintion) => usages.push((Arc::new(defintion), uline)), - Err(e) => tracing::error!("Failed to deserialize value for {}: {:?}", d_key, e), - } - } - } else if parts.len() != 2 { - tracing::error!("usage record has more than two ⚡ key was: {}", u_key); - } - } - - Ok(usages) -} - -pub fn definitions(ast_index: Arc, double_colon_path: &str) -> Result>, String> -{ +pub fn definitions( + ast_index: Arc, + double_colon_path: &str, +) -> Result>, String> { let c_prefix1 = format!("c|{} ", double_colon_path); // has space let c_prefix2 = format!("c|{}", double_colon_path); - let txn = ast_index.db_env.read_txn() + let txn = ast_index + .db_env + .read_txn() .map_err_with_prefix("Failed to create read transaction:")?; let mut path_groups: HashMap> = HashMap::new(); - let mut cursor = ast_index.db.prefix_iter(&txn, &c_prefix1) + let mut cursor = ast_index + .db + .prefix_iter(&txn, &c_prefix1) .map_err_with_prefix("Failed to create db prefix iterator:")?; while let Some(Ok((key, _))) = cursor.next() { if key.contains(" ⚡ ") { @@ -748,7 +862,10 @@ pub fn definitions(ast_index: Arc, double_colon_path: &str) -> Result, double_colon_path: &str) -> Result(&d_value) { Ok(definition) => defs.push(Arc::new(definition)), - Err(e) => return Err(format!("Failed to deserialize value for {}: {:?}", d_key, e)), + Err(e) => { + return Err(format!( + "Failed to deserialize value for {}: {:?}", + d_key, e + )) + } } } } @@ -773,8 +895,11 @@ pub fn definitions(ast_index: Arc, double_colon_path: &str) -> Result, language: String, subtree_of: String) -> Result -{ +pub fn type_hierarchy( + ast_index: Arc, + language: String, + subtree_of: String, +) -> Result { // Data example: // classes/cpp🔎Animal ⚡ alt_testsuite::cpp_goat_library::Goat 👉 "cpp🔎Goat" // classes/cpp🔎CosmicJustice ⚡ alt_testsuite::cpp_goat_main::CosmicGoat 👉 "cpp🔎CosmicGoat" @@ -797,9 +922,13 @@ pub fn type_hierarchy(ast_index: Arc, language: String, subtree_of: Strin let mut hierarchy_map: IndexMap> = IndexMap::new(); { - let txn = ast_index.db_env.read_txn() + let txn = ast_index + .db_env + .read_txn() .map_err_with_prefix("Failed to create read transaction:")?; - let mut cursor = ast_index.db.prefix_iter(&txn, &t_prefix) + let mut cursor = ast_index + .db + .prefix_iter(&txn, &t_prefix) .map_err_with_prefix("Failed to create prefix iterator:")?; while let Some(Ok((key, value))) = cursor.next() { @@ -807,15 +936,27 @@ pub fn type_hierarchy(ast_index: Arc, language: String, subtree_of: Strin if key.contains(" ⚡ ") { let parts: Vec<&str> = key.split(" ⚡ ").collect(); if parts.len() == 2 { - let parent = parts[0].trim().strip_prefix("classes|").unwrap_or(parts[0].trim()).to_string(); + let parent = parts[0] + .trim() + .strip_prefix("classes|") + .unwrap_or(parts[0].trim()) + .to_string(); let child = value_string.trim().to_string(); - hierarchy_map.entry(parent).or_insert_with(Vec::new).push(child); + hierarchy_map + .entry(parent) + .or_insert_with(Vec::new) + .push(child); } } } } - fn build_hierarchy(hierarchy_map: &IndexMap>, node: &str, indent: usize, language: &str) -> String { + fn build_hierarchy( + hierarchy_map: &IndexMap>, + node: &str, + indent: usize, + language: &str, + ) -> String { let prefix = format!("{}🔎", language); let node_stripped = node.strip_prefix(&prefix).unwrap_or(node); let mut result = format!("{:indent$}{}\n", "", node_stripped, indent = indent); @@ -830,7 +971,10 @@ pub fn type_hierarchy(ast_index: Arc, language: String, subtree_of: Strin let mut result = String::new(); if subtree_of.is_empty() { for root in hierarchy_map.keys() { - if !hierarchy_map.values().any(|children| children.contains(root)) { + if !hierarchy_map + .values() + .any(|children| children.contains(root)) + { result.push_str(&build_hierarchy(&hierarchy_map, root, 0, &language)); } } @@ -841,7 +985,12 @@ pub fn type_hierarchy(ast_index: Arc, language: String, subtree_of: Strin Ok(result) } -pub async fn definition_paths_fuzzy(ast_index: Arc, pattern: &str, top_n: usize, max_candidates_to_consider: usize) -> Result, String> { +pub async fn definition_paths_fuzzy( + ast_index: Arc, + pattern: &str, + top_n: usize, + max_candidates_to_consider: usize, +) -> Result, String> { let mut candidates = HashSet::new(); let mut patterns_to_try = Vec::new(); @@ -859,11 +1008,15 @@ pub async fn definition_paths_fuzzy(ast_index: Arc, pattern: &str, top_n: } { - let txn = ast_index.db_env.read_txn() + let txn = ast_index + .db_env + .read_txn() .map_err_with_prefix("Failed to create read transaction:")?; for pat in patterns_to_try { - let mut cursor = ast_index.db.prefix_iter(&txn, &format!("c|{}", pat)) + let mut cursor = ast_index + .db + .prefix_iter(&txn, &format!("c|{}", pat)) .map_err_with_prefix("Failed to create prefix iterator:")?; while let Some(Ok((key, _))) = cursor.next() { if let Some((_, dest)) = key.split_once(" ⚡ ") { @@ -881,7 +1034,8 @@ pub async fn definition_paths_fuzzy(ast_index: Arc, pattern: &str, top_n: let results = fuzzy_search(&pattern.to_string(), candidates, top_n, &[':']); - Ok(results.into_iter() + Ok(results + .into_iter() .map(|result| { if let Some(pos) = result.find("::") { result[pos + 2..].to_string() @@ -893,13 +1047,19 @@ pub async fn definition_paths_fuzzy(ast_index: Arc, pattern: &str, top_n: } #[allow(dead_code)] -pub fn dump_database(ast_index: Arc) -> Result -{ - let txn = ast_index.db_env.read_txn() +pub fn dump_database(ast_index: Arc) -> Result { + let txn = ast_index + .db_env + .read_txn() .map_err_with_prefix("Failed to create read transaction:")?; - let db_len = ast_index.db.len(&txn).map_err_with_prefix("Failed to count records:")?; + let db_len = ast_index + .db + .len(&txn) + .map_err_with_prefix("Failed to count records:")?; println!("\ndb has {db_len} records"); - let iter = ast_index.db.iter(&txn) + let iter = ast_index + .db + .iter(&txn) .map_err_with_prefix("Failed to create iterator:")?; for item in iter { let (key, value) = item.map_err_with_prefix("Failed to get item:")?; @@ -924,7 +1084,6 @@ pub fn dump_database(ast_index: Arc) -> Result Ok(db_len) } - #[cfg(test)] mod tests { use super::*; @@ -957,14 +1116,32 @@ mod tests { let library_text = read_file(library_file_path); let main_text = read_file(main_file_path); - doc_add(ast_index.clone(), &library_file_path.to_string(), &library_text, &mut errstats).await.unwrap(); - doc_add(ast_index.clone(), &main_file_path.to_string(), &main_text, &mut errstats).await.unwrap(); + doc_add( + ast_index.clone(), + &library_file_path.to_string(), + &library_text, + &mut errstats, + ) + .await + .unwrap(); + doc_add( + ast_index.clone(), + &main_file_path.to_string(), + &main_text, + &mut errstats, + ) + .await + .unwrap(); for error in errstats.errors { - println!("(E) {}:{} {}", error.err_cpath, error.err_line, error.err_message); + println!( + "(E) {}:{} {}", + error.err_cpath, error.err_line, error.err_message + ); } - let mut ucx: ConnectUsageContext = connect_usages_look_if_full_reset_needed(ast_index.clone()).unwrap(); + let mut ucx: ConnectUsageContext = + connect_usages_look_if_full_reset_needed(ast_index.clone()).unwrap(); loop { let did_anything = connect_usages(ast_index.clone(), &mut ucx).unwrap(); if !did_anything { @@ -974,7 +1151,8 @@ mod tests { let _ = dump_database(ast_index.clone()).unwrap(); - let hierarchy = type_hierarchy(ast_index.clone(), language.to_string(), "".to_string()).unwrap(); + let hierarchy = + type_hierarchy(ast_index.clone(), language.to_string(), "".to_string()).unwrap(); println!("Type hierarchy:\n{}", hierarchy); let expected_hierarchy = "Animal\n Goat\n CosmicGoat\nCosmicJustice\n CosmicGoat\n"; assert_eq!( @@ -983,7 +1161,12 @@ mod tests { ); println!( "Type hierachy subtree_of=Animal:\n{}", - type_hierarchy(ast_index.clone(), language.to_string(), format!("{}🔎Animal", language)).unwrap() + type_hierarchy( + ast_index.clone(), + language.to_string(), + format!("{}🔎Animal", language) + ) + .unwrap() ); // Goat::Goat() is a C++ constructor @@ -996,24 +1179,12 @@ mod tests { assert!(goat_def.len() == 1); let animalage_defs = definitions(ast_index.clone(), animal_age_location).unwrap(); - let animalage_def0 = animalage_defs.first().unwrap(); - let animalage_usage = usages(ast_index.clone(), animalage_def0.path(), 100).unwrap(); - let mut animalage_usage_str = String::new(); - for (used_at_def, used_at_uline) in animalage_usage.iter() { - animalage_usage_str.push_str(&format!("{:}:{}\n", used_at_def.cpath, used_at_uline)); - } - println!("animalage_usage_str:\n{}", animalage_usage_str); - assert!(animalage_usage.len() == 5); - - let goat_defs = definitions(ast_index.clone(), format!("{}_goat_library::Goat", language).as_str()).unwrap(); - let goat_def0 = goat_defs.first().unwrap(); - let goat_usage = usages(ast_index.clone(), goat_def0.path(), 100).unwrap(); - let mut goat_usage_str = String::new(); - for (used_at_def, used_at_uline) in goat_usage.iter() { - goat_usage_str.push_str(&format!("{:}:{}\n", used_at_def.cpath, used_at_uline)); - } - println!("goat_usage:\n{}", goat_usage_str); - assert!(goat_usage.len() == 1 || goat_usage.len() == 2); // derived from generates usages (new style: py) or not (old style) + + let goat_defs = definitions( + ast_index.clone(), + format!("{}_goat_library::Goat", language).as_str(), + ) + .unwrap(); doc_remove(ast_index.clone(), &library_file_path.to_string()); doc_remove(ast_index.clone(), &main_file_path.to_string()); @@ -1046,7 +1217,8 @@ mod tests { "Goat::Goat", "cpp", "Animal::age", - ).await; + ) + .await; } #[tokio::test] @@ -1060,6 +1232,7 @@ mod tests { "Goat::__init__", "py", "Animal::age", - ).await; + ) + .await; } } diff --git a/refact-agent/engine/src/ast/ast_indexer_thread.rs b/refact-agent/engine/src/ast/ast_indexer_thread.rs index 29723c5b3..212110133 100644 --- a/refact-agent/engine/src/ast/ast_indexer_thread.rs +++ b/refact-agent/engine/src/ast/ast_indexer_thread.rs @@ -10,8 +10,10 @@ use crate::files_in_workspace::Document; use crate::global_context::GlobalContext; use crate::ast::ast_structs::{AstDB, AstStatus, AstCounters, AstErrorStats}; -use crate::ast::ast_db::{ast_index_init, fetch_counters, doc_add, doc_remove, connect_usages, connect_usages_look_if_full_reset_needed}; - +use crate::ast::ast_db::{ + ast_index_init, fetch_counters, doc_add, doc_remove, connect_usages, + connect_usages_look_if_full_reset_needed, +}; pub struct AstIndexService { pub ast_index: Arc, @@ -43,7 +45,7 @@ async fn ast_indexer_thread( ast_service_locked.ast_sleeping_point.clone(), ) }; - let ast_max_files = ast_index.ast_max_files; // cannot change + let ast_max_files = ast_index.ast_max_files; // cannot change loop { let (cpath, left_todo_count) = { @@ -74,22 +76,43 @@ async fn ast_indexer_thread( break; } }; - let mut doc = Document { doc_path: cpath.clone().into(), doc_text: None }; + let mut doc = Document { + doc_path: cpath.clone().into(), + doc_text: None, + }; doc_remove(ast_index.clone(), &cpath); - match crate::files_in_workspace::get_file_text_from_memory_or_disk(gcx.clone(), &doc.doc_path).await { + match crate::files_in_workspace::get_file_text_from_memory_or_disk( + gcx.clone(), + &doc.doc_path, + ) + .await + { Ok(file_text) => { doc.update_text(&file_text); let mut error_message: Option = None; match doc.does_text_look_good() { Ok(_) => { let start_time = std::time::Instant::now(); - match doc_add(ast_index.clone(), &cpath, &file_text, &mut stats_parsing_errors).await { + match doc_add( + ast_index.clone(), + &cpath, + &file_text, + &mut stats_parsing_errors, + ) + .await + { Ok((defs, language)) => { let elapsed = start_time.elapsed().as_secs_f32(); if elapsed > 0.1 { - tracing::info!("{}/{} doc_add {:.3?}s {}", stats_parsed_cnt, (stats_parsed_cnt+left_todo_count), elapsed, crate::nicer_logs::last_n_chars(&cpath, 40)); + tracing::info!( + "{}/{} doc_add {:.3?}s {}", + stats_parsed_cnt, + (stats_parsed_cnt + left_todo_count), + elapsed, + crate::nicer_logs::last_n_chars(&cpath, 40) + ); } stats_parsed_cnt += 1; stats_symbols_cnt += defs.len(); @@ -109,12 +132,18 @@ async fn ast_indexer_thread( } } Err(_e) => { - tracing::info!("deleting from index {} because cannot read it", crate::nicer_logs::last_n_chars(&cpath, 30)); - *stats_failure_reasons.entry("cannot read file".to_string()).or_insert(0) += 1; + tracing::info!( + "deleting from index {} because cannot read it", + crate::nicer_logs::last_n_chars(&cpath, 30) + ); + *stats_failure_reasons + .entry("cannot read file".to_string()) + .or_insert(0) += 1; } } - if stats_update_ts.elapsed() >= std::time::Duration::from_millis(1000) { // can't be lower, because flush_sled_batch() happens not very often at all + if stats_update_ts.elapsed() >= std::time::Duration::from_millis(1000) { + // can't be lower, because flush_sled_batch() happens not very often at all let counters = fetch_counters(ast_index.clone()).unwrap_or_else(trace_and_default); { let mut status_locked = ast_status.lock().await; @@ -143,7 +172,10 @@ async fn ast_indexer_thread( let display_count = std::cmp::min(5, error_count); let mut error_messages = String::new(); for error in &stats_parsing_errors.errors[..display_count] { - error_messages.push_str(&format!("(E) {}:{} {}\n", error.err_cpath, error.err_line, error.err_message)); + error_messages.push_str(&format!( + "(E) {}:{} {}\n", + error.err_cpath, error.err_line, error.err_message + )); } if error_count > 5 { error_messages.push_str(&format!("...and {} more", error_count - 5)); @@ -152,7 +184,8 @@ async fn ast_indexer_thread( stats_parsing_errors = AstErrorStats::default(); } if stats_parsed_cnt + stats_symbols_cnt > 0 { - info!("AST finished parsing, got {} symbols by processing {} files in {:>.3}s", + info!( + "AST finished parsing, got {} symbols by processing {} files in {:>.3}s", stats_symbols_cnt, stats_parsed_cnt, stats_t0.elapsed().as_secs_f64() @@ -161,7 +194,8 @@ async fn ast_indexer_thread( let language_stats: String = if stats_success_languages.is_empty() { "no files".to_string() } else { - stats_success_languages.iter() + stats_success_languages + .iter() .map(|(lang, count)| format!("{:>30} {}", lang, count)) .collect::>() .join("\n") @@ -169,7 +203,8 @@ async fn ast_indexer_thread( let problem_stats: String = if stats_failure_reasons.is_empty() { "no errors".to_string() } else { - stats_failure_reasons.iter() + stats_failure_reasons + .iter() .map(|(reason, count)| format!("{:>30} {}", reason, count)) .collect::>() .join("\n") @@ -187,7 +222,8 @@ async fn ast_indexer_thread( stats_parsed_cnt = 0; stats_symbols_cnt = 0; reported_parse_stats = true; - let counters: AstCounters = fetch_counters(ast_index.clone()).unwrap_or_else(trace_and_default); + let counters: AstCounters = + fetch_counters(ast_index.clone()).unwrap_or_else(trace_and_default); { let mut status_locked = ast_status.lock().await; status_locked.files_unparsed = 0; @@ -200,13 +236,15 @@ async fn ast_indexer_thread( } // Connect usages, unless we have files in the todo - let mut usagecx = connect_usages_look_if_full_reset_needed(ast_index.clone()).unwrap_or_else(trace_and_default); + let mut usagecx = connect_usages_look_if_full_reset_needed(ast_index.clone()) + .unwrap_or_else(trace_and_default); loop { todo_count = ast_service.lock().await.ast_todo.len(); if todo_count > 0 { break; } - let did_anything = connect_usages(ast_index.clone(), &mut usagecx).unwrap_or_else(trace_and_default); + let did_anything = + connect_usages(ast_index.clone(), &mut usagecx).unwrap_or_else(trace_and_default); if !did_anything { break; } @@ -217,14 +255,22 @@ async fn ast_indexer_thread( let display_count = std::cmp::min(5, error_count); let mut error_messages = String::new(); for error in &usagecx.errstats.errors[..display_count] { - error_messages.push_str(&format!("(U) {}:{} {}\n", error.err_cpath, error.err_line, error.err_message)); + error_messages.push_str(&format!( + "(U) {}:{} {}\n", + error.err_cpath, error.err_line, error.err_message + )); } if error_count > 5 { error_messages.push_str(&format!("...and {} more", error_count - 5)); } info!("AST connection graph errors:\n{}", error_messages); } - if usagecx.usages_connected + usagecx.usages_not_found + usagecx.usages_ambiguous + usagecx.usages_homeless > 0 { + if usagecx.usages_connected + + usagecx.usages_not_found + + usagecx.usages_ambiguous + + usagecx.usages_homeless + > 0 + { info!("AST connection graph stats: homeless={}, connected={}, not_found={}, ambiguous={} in {:.3}s", usagecx.usages_homeless, usagecx.usages_connected, @@ -240,7 +286,8 @@ async fn ast_indexer_thread( } if !reported_connect_stats { - let counters: AstCounters = fetch_counters(ast_index.clone()).unwrap_or_else(trace_and_default); + let counters: AstCounters = + fetch_counters(ast_index.clone()).unwrap_or_else(trace_and_default); { let mut status_locked = ast_status.lock().await; status_locked.files_unparsed = 0; @@ -258,12 +305,20 @@ async fn ast_indexer_thread( reported_connect_stats = true; } - tokio::time::timeout(tokio::time::Duration::from_secs(10), ast_sleeping_point.notified()).await.ok(); + tokio::time::timeout( + tokio::time::Duration::from_secs(10), + ast_sleeping_point.notified(), + ) + .await + .ok(); } } -pub async fn ast_indexer_block_until_finished(ast_service: Arc>, max_blocking_time_ms: usize, wake_up_indexer: bool) -> bool -{ +pub async fn ast_indexer_block_until_finished( + ast_service: Arc>, + max_blocking_time_ms: usize, + wake_up_indexer: bool, +) -> bool { let max_blocking_duration = tokio::time::Duration::from_millis(max_blocking_time_ms as u64); let start_time = std::time::Instant::now(); let ast_sleeping_point = { @@ -299,8 +354,10 @@ pub async fn ast_indexer_block_until_finished(ast_service: Arc Arc> -{ +pub async fn ast_service_init( + ast_permanent: String, + ast_max_files: usize, +) -> Arc> { let ast_index = ast_index_init(ast_permanent, ast_max_files).await; let ast_status = Arc::new(AMutex::new(AstStatus { astate_notify: Arc::new(ANotify::new()), @@ -310,7 +367,7 @@ pub async fn ast_service_init(ast_permanent: String, ast_max_files: usize) -> Ar ast_index_files_total: 0, ast_index_symbols_total: 0, ast_index_usages_total: 0, - ast_max_files_hit: false + ast_max_files_hit: false, })); let ast_service = AstIndexService { ast_sleeping_point: Arc::new(ANotify::new()), @@ -324,19 +381,19 @@ pub async fn ast_service_init(ast_permanent: String, ast_max_files: usize) -> Ar pub async fn ast_indexer_start( ast_service: Arc>, gcx: Arc>, -) -> Vec> -{ - let indexer_handle = tokio::spawn( - ast_indexer_thread( - Arc::downgrade(&gcx), - ast_service.clone(), - ) - ); +) -> Vec> { + let indexer_handle = tokio::spawn(ast_indexer_thread( + Arc::downgrade(&gcx), + ast_service.clone(), + )); return vec![indexer_handle]; } -pub async fn ast_indexer_enqueue_files(ast_service: Arc>, cpaths: &Vec, wake_up_indexer: bool) -{ +pub async fn ast_indexer_enqueue_files( + ast_service: Arc>, + cpaths: &Vec, + wake_up_indexer: bool, +) { let ast_status; let nonzero = cpaths.len() > 0; { diff --git a/refact-agent/engine/src/ast/ast_parse_anything.rs b/refact-agent/engine/src/ast/ast_parse_anything.rs index ca11a1085..8a5465fc1 100644 --- a/refact-agent/engine/src/ast/ast_parse_anything.rs +++ b/refact-agent/engine/src/ast/ast_parse_anything.rs @@ -8,29 +8,26 @@ use sha2::{Sha256, Digest}; use crate::ast::ast_structs::{AstDefinition, AstUsage, AstErrorStats}; use crate::ast::treesitter::parsers::get_ast_parser_by_filename; use crate::ast::treesitter::structs::SymbolType; -use crate::ast::treesitter::ast_instance_structs::{VariableUsage, VariableDefinition, AstSymbolInstance, FunctionDeclaration, StructDeclaration, FunctionCall, AstSymbolInstanceArc}; +use crate::ast::treesitter::ast_instance_structs::{ + VariableUsage, VariableDefinition, AstSymbolInstance, FunctionDeclaration, StructDeclaration, + FunctionCall, AstSymbolInstanceArc, +}; use crate::ast::parse_common::line12mid_from_ranges; - const TOO_MANY_SYMBOLS_IN_FILE: usize = 10000; fn _is_declaration(t: SymbolType) -> bool { match t { - SymbolType::Module | - SymbolType::StructDeclaration | - SymbolType::TypeAlias | - SymbolType::ClassFieldDeclaration | - SymbolType::ImportDeclaration | - SymbolType::VariableDefinition | - SymbolType::FunctionDeclaration | - SymbolType::CommentDefinition | - SymbolType::Unknown => { - true - } - SymbolType::FunctionCall | - SymbolType::VariableUsage => { - false - } + SymbolType::Module + | SymbolType::StructDeclaration + | SymbolType::TypeAlias + | SymbolType::ClassFieldDeclaration + | SymbolType::ImportDeclaration + | SymbolType::VariableDefinition + | SymbolType::FunctionDeclaration + | SymbolType::CommentDefinition + | SymbolType::Unknown => true, + SymbolType::FunctionCall | SymbolType::VariableUsage => false, } } @@ -46,8 +43,13 @@ fn _go_to_parent_until_declaration( if node_option.is_none() { // XXX: legit in Python (assignment at top level, function call at top level) errors.add_error( - "".to_string(), start_node_read.full_range().start_point.row + 1, - format!("go_to_parent: parent decl not found for {:?}", start_node_read.name()).as_str(), + "".to_string(), + start_node_read.full_range().start_point.row + 1, + format!( + "go_to_parent: parent decl not found for {:?}", + start_node_read.name() + ) + .as_str(), ); return Uuid::nil(); } @@ -106,7 +108,7 @@ fn _find_top_level_nodes(pcx: &mut ParseContext) -> &Vec { let mut top_level: Vec = Vec::new(); for (_, node_arc) in pcx.map.iter() { let node = node_arc.read(); - assert!(node.parent_guid().is_some()); // parent always exists for some reason :/ + assert!(node.parent_guid().is_some()); // parent always exists for some reason :/ if _is_declaration(node.symbol_type()) { if !pcx.map.contains_key(&node.parent_guid().unwrap()) { top_level.push(node_arc.clone()); @@ -145,7 +147,8 @@ fn _name_to_usage( if _is_declaration(node.symbol_type()) { look_here.push(node_option.unwrap().clone()); - if let Some(function_declaration) = node.as_any().downcast_ref::() { + if let Some(function_declaration) = node.as_any().downcast_ref::() + { for arg in &function_declaration.args { if arg.name == name_of_anything { // eprintln!("{:?} is an argument in a function {:?} => ignore, no path at all, no link", name_of_anything, function_declaration.name()); @@ -163,7 +166,12 @@ fn _name_to_usage( } if let Some(struct_declaration) = node.as_any().downcast_ref::() { - result.targets_for_guesswork.push(format!("?::{}🔎{}::{}", node.language().to_string(), struct_declaration.name(), name_of_anything)); + result.targets_for_guesswork.push(format!( + "?::{}🔎{}::{}", + node.language().to_string(), + struct_declaration.name(), + name_of_anything + )); // Add all children nodes (shallow) for child_guid in struct_declaration.childs_guid() { if let Some(child_node) = pcx.map.get(child_guid) { @@ -190,18 +198,27 @@ fn _name_to_usage( if _is_declaration(node.symbol_type()) { // eprintln!("_name_to_usage {:?} looking in {:?}", name_of_anything, node.name()); if node.name() == name_of_anything { - result.resolved_as = [pcx.file_global_path.clone(), _path_of_node(&pcx.map, Some(node.guid().clone()))].concat().join("::"); + result.resolved_as = [ + pcx.file_global_path.clone(), + _path_of_node(&pcx.map, Some(node.guid().clone())), + ] + .concat() + .join("::"); result.debug_hint = "up".to_string(); } } } if allow_global_ref { - result.targets_for_guesswork.push(format!("?::{}", name_of_anything)); + result + .targets_for_guesswork + .push(format!("?::{}", name_of_anything)); Some(result) } else { // ?::DerivedFrom1::f ?::DerivedFrom2::f f - result.targets_for_guesswork.push(format!("{}", name_of_anything)); + result + .targets_for_guesswork + .push(format!("{}", name_of_anything)); Some(result) } } @@ -254,9 +271,16 @@ fn _typeof( if let Some(first_type) = variable_definition.types().get(0) { let type_name = first_type.name.clone().unwrap_or_default(); if type_name.is_empty() { - errors.add_error("".to_string(), node.full_range().start_point.row + 1, "nameless type for variable definition"); + errors.add_error( + "".to_string(), + node.full_range().start_point.row + 1, + "nameless type for variable definition", + ); } else { - return vec!["?".to_string(), format!("{}🔎{}", node.language().to_string(), type_name)]; + return vec![ + "?".to_string(), + format!("{}🔎{}", node.language().to_string(), type_name), + ]; } } } @@ -269,9 +293,20 @@ fn _typeof( if arg.name == variable_or_param_name { if let Some(arg_type) = &arg.type_ { if arg_type.name.is_none() || arg_type.name.clone().unwrap().is_empty() { - errors.add_error("".to_string(), node.full_range().start_point.row + 1, "nameless type for function argument"); + errors.add_error( + "".to_string(), + node.full_range().start_point.row + 1, + "nameless type for function argument", + ); } else { - return vec!["?".to_string(), format!("{}🔎{}", node.language().to_string(), arg_type.name.clone().unwrap())]; + return vec![ + "?".to_string(), + format!( + "{}🔎{}", + node.language().to_string(), + arg_type.name.clone().unwrap() + ), + ]; } } } @@ -307,15 +342,26 @@ fn _usage_or_typeof_caller_colon_colon_usage( uline, }; let caller_node = caller.read(); - let typeof_caller = _typeof(pcx, caller_node.guid().clone(), caller_node.name().to_string(), errors); + let typeof_caller = _typeof( + pcx, + caller_node.guid().clone(), + caller_node.name().to_string(), + errors, + ); // typeof_caller will be "?" if nothing found, start with "file" if type found in the current file if typeof_caller.first() == Some(&"file".to_string()) { // actually fully resolved! - result.resolved_as = [typeof_caller, vec![symbol.name().to_string()]].concat().join("::"); + result.resolved_as = [typeof_caller, vec![symbol.name().to_string()]] + .concat() + .join("::"); result.debug_hint = caller_node.name().to_string(); } else { // not fully resolved - result.targets_for_guesswork.push([typeof_caller, vec![symbol.name().to_string()]].concat().join("::")); + result.targets_for_guesswork.push( + [typeof_caller, vec![symbol.name().to_string()]] + .concat() + .join("::"), + ); result.debug_hint = caller_node.name().to_string(); } Some(result) @@ -326,7 +372,13 @@ fn _usage_or_typeof_caller_colon_colon_usage( // caller is about caller.function_call(1, 2, 3), in this case means just function_call(1, 2, 3) without anything on the left // just look for a name in function's parent and above // - let tmp = _name_to_usage(pcx, uline, symbol.parent_guid().clone(), symbol.name().to_string(), false); + let tmp = _name_to_usage( + pcx, + uline, + symbol.parent_guid().clone(), + symbol.name().to_string(), + false, + ); // eprintln!(" _usage_or_typeof_caller_colon_colon_usage {} _name_to_usage={:?}", symbol.name().to_string(), tmp); tmp } @@ -336,8 +388,7 @@ pub fn parse_anything( cpath: &str, text: &str, errors: &mut AstErrorStats, -) -> Result<(Vec, String), String> -{ +) -> Result<(Vec, String), String> { let path = PathBuf::from(cpath); let (mut parser, language_id) = get_ast_parser_by_filename(&path).map_err(|err| err.message)?; let language = language_id.to_string(); @@ -349,7 +400,10 @@ pub fn parse_anything( let symbols = parser.parse(text, &path); if symbols.len() > TOO_MANY_SYMBOLS_IN_FILE { - return Err(format!("more than {} symbols, generated?", TOO_MANY_SYMBOLS_IN_FILE)); + return Err(format!( + "more than {} symbols, generated?", + TOO_MANY_SYMBOLS_IN_FILE + )); } let symbols2 = symbols.clone(); @@ -366,28 +420,45 @@ pub fn parse_anything( let symbol = symbol.read(); pcx.map.insert(symbol.guid().clone(), symbol_arc_clone); match symbol.symbol_type() { - SymbolType::StructDeclaration | - SymbolType::TypeAlias | - SymbolType::ClassFieldDeclaration | - SymbolType::VariableDefinition | - SymbolType::FunctionDeclaration | - SymbolType::Unknown => { + SymbolType::StructDeclaration + | SymbolType::TypeAlias + | SymbolType::ClassFieldDeclaration + | SymbolType::VariableDefinition + | SymbolType::FunctionDeclaration + | SymbolType::Unknown => { let mut this_is_a_class = "".to_string(); let mut this_class_derived_from = vec![]; let mut usages = vec![]; - if let Some(struct_declaration) = symbol.as_any().downcast_ref::() { + if let Some(struct_declaration) = + symbol.as_any().downcast_ref::() + { this_is_a_class = format!("{}🔎{}", pcx.language, struct_declaration.name()); for base_class in struct_declaration.inherited_types.iter() { let base_class_name = base_class.name.clone().unwrap_or_default(); if base_class_name.is_empty() { - errors.add_error("".to_string(), struct_declaration.full_range().start_point.row + 1, "nameless base class"); + errors.add_error( + "".to_string(), + struct_declaration.full_range().start_point.row + 1, + "nameless base class", + ); continue; } - this_class_derived_from.push(format!("{}🔎{}", pcx.language, base_class_name)); - if let Some(usage) = _name_to_usage(&mut pcx, symbol.full_range().start_point.row + 1, symbol.parent_guid().clone(), base_class_name, true) { + this_class_derived_from + .push(format!("{}🔎{}", pcx.language, base_class_name)); + if let Some(usage) = _name_to_usage( + &mut pcx, + symbol.full_range().start_point.row + 1, + symbol.parent_guid().clone(), + base_class_name, + true, + ) { usages.push(usage); } else { - errors.add_error("".to_string(), struct_declaration.full_range().start_point.row + 1, "unable to create base class usage"); + errors.add_error( + "".to_string(), + struct_declaration.full_range().start_point.row + 1, + "unable to create base class usage", + ); } } } @@ -396,14 +467,19 @@ pub fn parse_anything( if let Some(parent_guid) = symbol.parent_guid() { if let Some(parent_symbol) = pcx.map.get(&parent_guid) { let parent_symbol = parent_symbol.read(); - if parent_symbol.as_any().downcast_ref::().is_some() { + if parent_symbol + .as_any() + .downcast_ref::() + .is_some() + { skip_var_because_parent_is_function = true; } } } } if !symbol.name().is_empty() && !skip_var_because_parent_is_function { - let (line1, line2, line_mid) = line12mid_from_ranges(symbol.full_range(), symbol.definition_range()); + let (line1, line2, line_mid) = + line12mid_from_ranges(symbol.full_range(), symbol.definition_range()); let definition = AstDefinition { official_path: _path_of_node(&pcx.map, Some(symbol.guid().clone())), symbol_type: symbol.symbol_type().clone(), @@ -422,14 +498,18 @@ pub fn parse_anything( }; pcx.definitions.insert(symbol.guid().clone(), definition); } else if symbol.name().is_empty() { - errors.add_error("".to_string(), symbol.full_range().start_point.row + 1, "nameless decl"); + errors.add_error( + "".to_string(), + symbol.full_range().start_point.row + 1, + "nameless decl", + ); } } - SymbolType::Module | - SymbolType::CommentDefinition | - SymbolType::ImportDeclaration | - SymbolType::FunctionCall | - SymbolType::VariableUsage => { + SymbolType::Module + | SymbolType::CommentDefinition + | SymbolType::ImportDeclaration + | SymbolType::FunctionCall + | SymbolType::VariableUsage => { // do nothing } } @@ -439,47 +519,67 @@ pub fn parse_anything( let symbol = symbol_arc.read(); // eprintln!("pass2: {:?}", symbol); match symbol.symbol_type() { - SymbolType::StructDeclaration | - SymbolType::Module | - SymbolType::TypeAlias | - SymbolType::ClassFieldDeclaration | - SymbolType::ImportDeclaration | - SymbolType::VariableDefinition | - SymbolType::FunctionDeclaration | - SymbolType::CommentDefinition | - SymbolType::Unknown => { + SymbolType::StructDeclaration + | SymbolType::Module + | SymbolType::TypeAlias + | SymbolType::ClassFieldDeclaration + | SymbolType::ImportDeclaration + | SymbolType::VariableDefinition + | SymbolType::FunctionDeclaration + | SymbolType::CommentDefinition + | SymbolType::Unknown => { continue; } SymbolType::FunctionCall => { - let function_call = symbol.as_any().downcast_ref::().expect("xxx1000"); + let function_call = symbol + .as_any() + .downcast_ref::() + .expect("xxx1000"); let uline = function_call.full_range().start_point.row + 1; if function_call.name().is_empty() { errors.add_error("".to_string(), uline, "nameless call"); continue; } - let usage = _usage_or_typeof_caller_colon_colon_usage(&mut pcx, function_call.get_caller_guid().clone(), uline, function_call, errors); + let usage = _usage_or_typeof_caller_colon_colon_usage( + &mut pcx, + function_call.get_caller_guid().clone(), + uline, + function_call, + errors, + ); // eprintln!("function call name={} usage={:?} debug_hint={:?}", function_call.name(), usage, debug_hint); if usage.is_none() { continue; } - let my_parent = _go_to_parent_until_declaration(&pcx.map, symbol_arc.clone(), errors); + let my_parent = + _go_to_parent_until_declaration(&pcx.map, symbol_arc.clone(), errors); if let Some(my_parent_def) = pcx.definitions.get_mut(&my_parent) { my_parent_def.usages.push(usage.unwrap()); } } SymbolType::VariableUsage => { - let variable_usage = symbol.as_any().downcast_ref::().expect("xxx1001"); + let variable_usage = symbol + .as_any() + .downcast_ref::() + .expect("xxx1001"); let uline = variable_usage.full_range().start_point.row + 1; if variable_usage.name().is_empty() { errors.add_error("".to_string(), uline, "nameless variable usage"); continue; } - let usage = _usage_or_typeof_caller_colon_colon_usage(&mut pcx, variable_usage.fields().caller_guid.clone(), uline, variable_usage, errors); + let usage = _usage_or_typeof_caller_colon_colon_usage( + &mut pcx, + variable_usage.fields().caller_guid.clone(), + uline, + variable_usage, + errors, + ); // eprintln!("variable usage name={} usage={:?}", variable_usage.name(), usage); if usage.is_none() { continue; } - let my_parent = _go_to_parent_until_declaration(&pcx.map, symbol_arc.clone(), errors); + let my_parent = + _go_to_parent_until_declaration(&pcx.map, symbol_arc.clone(), errors); if let Some(my_parent_def) = pcx.definitions.get_mut(&my_parent) { my_parent_def.usages.push(usage.unwrap()); } @@ -515,7 +615,8 @@ pub fn filesystem_path_to_double_colon_path(cpath: &str) -> Vec { const ALPHANUM: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; let mut x = 0usize; - let short_alphanum: String = result.iter() + let short_alphanum: String = result + .iter() .map(|&byte| { x += byte as usize; x %= ALPHANUM.len(); @@ -532,8 +633,7 @@ pub fn parse_anything_and_add_file_path( cpath: &str, text: &str, errstats: &mut AstErrorStats, -) -> Result<(Vec, String), String> -{ +) -> Result<(Vec, String), String> { let file_global_path = filesystem_path_to_double_colon_path(cpath); let file_global_path_str = file_global_path.join("::"); let errors_count_before = errstats.errors.len(); @@ -546,10 +646,8 @@ pub fn parse_anything_and_add_file_path( if !definition.official_path.is_empty() && definition.official_path[0] == "root" { definition.official_path.remove(0); } - definition.official_path = [ - file_global_path.clone(), - definition.official_path.clone() - ].concat(); + definition.official_path = + [file_global_path.clone(), definition.official_path.clone()].concat(); for usage in &mut definition.usages { for t in &mut usage.targets_for_guesswork { if t.starts_with("file::") || t.starts_with("root::") { @@ -570,7 +668,6 @@ pub fn parse_anything_and_add_file_path( Ok((definitions, language)) } - #[cfg(test)] mod tests { use super::*; @@ -592,11 +689,25 @@ mod tests { } fn _must_be_no_diff(expected: &str, produced: &str) -> String { - let expected_lines: Vec<_> = expected.lines().map(|line| line.trim()).filter(|line| !line.is_empty()).collect(); - let produced_lines: Vec<_> = produced.lines().map(|line| line.trim()).filter(|line| !line.is_empty()).collect(); + let expected_lines: Vec<_> = expected + .lines() + .map(|line| line.trim()) + .filter(|line| !line.is_empty()) + .collect(); + let produced_lines: Vec<_> = produced + .lines() + .map(|line| line.trim()) + .filter(|line| !line.is_empty()) + .collect(); let mut mistakes = String::new(); - let missing_in_produced: Vec<_> = expected_lines.iter().filter(|line| !produced_lines.contains(line)).collect(); - let missing_in_expected: Vec<_> = produced_lines.iter().filter(|line| !expected_lines.contains(line)).collect(); + let missing_in_produced: Vec<_> = expected_lines + .iter() + .filter(|line| !produced_lines.contains(line)) + .collect(); + let missing_in_expected: Vec<_> = produced_lines + .iter() + .filter(|line| !expected_lines.contains(line)) + .collect(); if !missing_in_expected.is_empty() { mistakes.push_str("bad output:\n"); for line in missing_in_expected.iter() { @@ -617,7 +728,8 @@ mod tests { let mut errstats = AstErrorStats::default(); let absfn1 = std::fs::canonicalize(input_file).unwrap(); let text = _read_file(absfn1.to_str().unwrap()); - let (definitions, _language) = parse_anything(absfn1.to_str().unwrap(), &text, &mut errstats).unwrap(); + let (definitions, _language) = + parse_anything(absfn1.to_str().unwrap(), &text, &mut errstats).unwrap(); let mut defs_str = String::new(); for d in definitions.iter() { defs_str.push_str(&format!("{:?}\n", d)); @@ -629,7 +741,10 @@ mod tests { println!("PROBLEMS {:#?}:\n{}/PROBLEMS", absfn1, oops); } for error in errstats.errors { - println!("(E) {}:{} {}", error.err_cpath, error.err_line, error.err_message); + println!( + "(E) {}:{} {}", + error.err_cpath, error.err_line, error.err_message + ); } } @@ -637,7 +752,7 @@ mod tests { fn test_ast_parse_cpp_library() { _run_parse_test( "src/ast/alt_testsuite/cpp_goat_library.h", - "src/ast/alt_testsuite/cpp_goat_library.correct" + "src/ast/alt_testsuite/cpp_goat_library.correct", ); } @@ -645,7 +760,7 @@ mod tests { fn test_ast_parse_cpp_main() { _run_parse_test( "src/ast/alt_testsuite/cpp_goat_main.cpp", - "src/ast/alt_testsuite/cpp_goat_main.correct" + "src/ast/alt_testsuite/cpp_goat_main.correct", ); } @@ -653,8 +768,7 @@ mod tests { fn test_ast_parse_py_library() { _run_parse_test( "src/ast/alt_testsuite/py_goat_library.py", - "src/ast/alt_testsuite/py_goat_library.correct" + "src/ast/alt_testsuite/py_goat_library.correct", ); } } - diff --git a/refact-agent/engine/src/ast/ast_structs.rs b/refact-agent/engine/src/ast/ast_structs.rs index 96d0e5386..896e5b181 100644 --- a/refact-agent/engine/src/ast/ast_structs.rs +++ b/refact-agent/engine/src/ast/ast_structs.rs @@ -5,7 +5,6 @@ use tempfile::TempDir; use tokio::sync::{Notify as ANotify}; pub use crate::ast::treesitter::structs::SymbolType; - #[derive(Serialize, Deserialize, Clone)] pub struct AstUsage { // Linking means trying to match targets_for_guesswork against official_path, the longer @@ -13,21 +12,21 @@ pub struct AstUsage { pub targets_for_guesswork: Vec, // ?::DerivedFrom1::f ?::DerivedFrom2::f ?::f pub resolved_as: String, pub debug_hint: String, - pub uline: usize, // starts from 1, like other line numbers + pub uline: usize, // starts from 1, like other line numbers } #[derive(Serialize, Deserialize)] pub struct AstDefinition { - pub official_path: Vec, // file::namespace::class::method becomes ["file", "namespace", "class", "method"] + pub official_path: Vec, // file::namespace::class::method becomes ["file", "namespace", "class", "method"] pub symbol_type: SymbolType, pub usages: Vec, - pub resolved_type: String, // for type derivation at pass2 or something, not used much now - pub this_is_a_class: String, // cpp🔎Goat + pub resolved_type: String, // for type derivation at pass2 or something, not used much now + pub this_is_a_class: String, // cpp🔎Goat pub this_class_derived_from: Vec, // cpp🔎Animal, cpp🔎CosmicJustice pub cpath: String, - pub decl_line1: usize, // starts from 1, guaranteed > 0 - pub decl_line2: usize, // guaranteed >= line1 - pub body_line1: usize, // use full_line1() full_line2() if not sure + pub decl_line1: usize, // starts from 1, guaranteed > 0 + pub decl_line2: usize, // guaranteed >= line1 + pub body_line1: usize, // use full_line1() full_line2() if not sure pub body_line2: usize, } @@ -37,9 +36,16 @@ impl AstDefinition { } pub fn path_drop0(&self) -> String { - if self.official_path.len() > 3 { // new style long path, starts with hex code we don't want users to see - self.official_path.iter().skip(1).cloned().collect::>().join("::") - } else { // there's not much to cut + if self.official_path.len() > 3 { + // new style long path, starts with hex code we don't want users to see + self.official_path + .iter() + .skip(1) + .cloned() + .collect::>() + .join("::") + } else { + // there's not much to cut self.official_path.join("::") } } @@ -85,7 +91,6 @@ pub struct AstCounters { pub counter_docs: i32, } - const TOO_MANY_ERRORS: usize = 1000; pub struct AstError { @@ -126,13 +131,16 @@ impl Default for AstErrorStats { } } - impl fmt::Debug for AstDefinition { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let usages_paths: Vec = self.usages.iter() + let usages_paths: Vec = self + .usages + .iter() .map(|link| format!("{:?}", link)) .collect(); - let derived_from_paths: Vec = self.this_class_derived_from.iter() + let derived_from_paths: Vec = self + .this_class_derived_from + .iter() .map(|link| format!("{:?}", link)) .collect(); @@ -172,7 +180,11 @@ impl fmt::Debug for AstUsage { f, "U{{ {} {} }}", self.debug_hint, - if self.resolved_as.len() > 0 { self.resolved_as.clone() } else { format!("guess {}", self.targets_for_guesswork.join(" ")) } + if self.resolved_as.len() > 0 { + self.resolved_as.clone() + } else { + format!("guess {}", self.targets_for_guesswork.join(" ")) + } ) } } diff --git a/refact-agent/engine/src/ast/chunk_utils.rs b/refact-agent/engine/src/ast/chunk_utils.rs index 569880bf3..7a3fa0283 100644 --- a/refact-agent/engine/src/ast/chunk_utils.rs +++ b/refact-agent/engine/src/ast/chunk_utils.rs @@ -10,14 +10,16 @@ use crate::tokens::count_text_tokens; use crate::tokens::count_text_tokens_with_fallback; use crate::vecdb::vdb_structs::SplitResult; - pub fn official_text_hashing_function(s: &str) -> String { let digest = md5::compute(s); format!("{:x}", digest) } - -fn split_line_if_needed(line: &str, tokenizer: Option>, tokens_limit: usize) -> Vec { +fn split_line_if_needed( + line: &str, + tokenizer: Option>, + tokens_limit: usize, +) -> Vec { if let Some(tokenizer) = tokenizer { tokenizer.encode(line, false).map_or_else( |_| split_without_tokenizer(line, tokens_limit), @@ -30,7 +32,7 @@ fn split_line_if_needed(line: &str, tokenizer: Option>, tokens_li .filter_map(|chunk| tokenizer.decode(chunk, true).ok()) .collect() } - } + }, ) } else { split_without_tokenizer(line, tokens_limit) @@ -41,7 +43,8 @@ fn split_without_tokenizer(line: &str, tokens_limit: usize) -> Vec { if count_text_tokens(None, line).is_ok_and(|tokens| tokens <= tokens_limit) { vec![line.to_string()] } else { - Rope::from_str(line).chars() + Rope::from_str(line) + .chars() .collect::>() .chunks(tokens_limit) .map(|chunk| chunk.iter().collect()) @@ -49,14 +52,15 @@ fn split_without_tokenizer(line: &str, tokens_limit: usize) -> Vec { } } -pub fn get_chunks(text: &String, - file_path: &PathBuf, - symbol_path: &String, - top_bottom_rows: (usize, usize), // case with top comments - tokenizer: Option>, - tokens_limit: usize, - intersection_lines: usize, - use_symbol_range_always: bool, // use for skeleton case +pub fn get_chunks( + text: &String, + file_path: &PathBuf, + symbol_path: &String, + top_bottom_rows: (usize, usize), // case with top comments + tokenizer: Option>, + tokens_limit: usize, + intersection_lines: usize, + use_symbol_range_always: bool, // use for skeleton case ) -> Vec { let (top_row, bottom_row) = top_bottom_rows; let mut chunks: Vec = Vec::new(); @@ -64,7 +68,8 @@ pub fn get_chunks(text: &String, let mut current_tok_n = 0; let lines = text.split("\n").collect::>(); - { // try to split chunks from top to bottom + { + // try to split chunks from top to bottom let mut line_idx: usize = 0; let mut previous_start = line_idx; while line_idx < lines.len() { @@ -73,9 +78,19 @@ pub fn get_chunks(text: &String, if !accum.is_empty() && current_tok_n + line_tok_n > tokens_limit { let current_line = accum.iter().map(|(line, _)| line).join("\n"); - let start_line = if use_symbol_range_always { top_row as u64 } else { accum.front().unwrap().1 as u64 }; - let end_line = if use_symbol_range_always { bottom_row as u64 } else { accum.back().unwrap().1 as u64 }; - for chunked_line in split_line_if_needed(¤t_line, tokenizer.clone(), tokens_limit) { + let start_line = if use_symbol_range_always { + top_row as u64 + } else { + accum.front().unwrap().1 as u64 + }; + let end_line = if use_symbol_range_always { + bottom_row as u64 + } else { + accum.back().unwrap().1 as u64 + }; + for chunked_line in + split_line_if_needed(¤t_line, tokenizer.clone(), tokens_limit) + { chunks.push(SplitResult { file_path: file_path.clone(), window_text: chunked_line.clone(), @@ -87,7 +102,8 @@ pub fn get_chunks(text: &String, } accum.clear(); current_tok_n = 0; - line_idx = (previous_start + 1).max((line_idx as i64 - intersection_lines as i64).max(0) as usize); + line_idx = (previous_start + 1) + .max((line_idx as i64 - intersection_lines as i64).max(0) as usize); previous_start = line_idx; } else { current_tok_n += line_tok_n; @@ -107,9 +123,19 @@ pub fn get_chunks(text: &String, let text_orig_tok_n = count_text_tokens_with_fallback(tokenizer.clone(), line); if !accum.is_empty() && current_tok_n + text_orig_tok_n > tokens_limit { let current_line = accum.iter().map(|(line, _)| line).join("\n"); - let start_line = if use_symbol_range_always { top_row as u64 } else { accum.front().unwrap().1 as u64 }; - let end_line = if use_symbol_range_always { bottom_row as u64 } else { accum.back().unwrap().1 as u64 }; - for chunked_line in split_line_if_needed(¤t_line, tokenizer.clone(), tokens_limit) { + let start_line = if use_symbol_range_always { + top_row as u64 + } else { + accum.front().unwrap().1 as u64 + }; + let end_line = if use_symbol_range_always { + bottom_row as u64 + } else { + accum.back().unwrap().1 as u64 + }; + for chunked_line in + split_line_if_needed(¤t_line, tokenizer.clone(), tokens_limit) + { chunks.push(SplitResult { file_path: file_path.clone(), window_text: chunked_line.clone(), @@ -131,8 +157,16 @@ pub fn get_chunks(text: &String, if !accum.is_empty() { let current_line = accum.iter().map(|(line, _)| line).join("\n"); - let start_line = if use_symbol_range_always { top_row as u64 } else { accum.front().unwrap().1 as u64 }; - let end_line = if use_symbol_range_always { bottom_row as u64 } else { accum.back().unwrap().1 as u64 }; + let start_line = if use_symbol_range_always { + top_row as u64 + } else { + accum.front().unwrap().1 as u64 + }; + let end_line = if use_symbol_range_always { + bottom_row as u64 + } else { + accum.back().unwrap().1 as u64 + }; for chunked_line in split_line_if_needed(¤t_line, tokenizer.clone(), tokens_limit) { chunks.push(SplitResult { file_path: file_path.clone(), @@ -145,7 +179,10 @@ pub fn get_chunks(text: &String, } } - chunks.into_iter().filter(|c|!c.window_text.is_empty()).collect() + chunks + .into_iter() + .filter(|c| !c.window_text.is_empty()) + .collect() } #[cfg(test)] @@ -180,7 +217,9 @@ mod tests { #[test] fn simple_chunk_test_1_with_128_limit() { - let tokenizer = Some(Arc::new(tokenizers::Tokenizer::from_str(DUMMY_TOKENIZER).unwrap())); + let tokenizer = Some(Arc::new( + tokenizers::Tokenizer::from_str(DUMMY_TOKENIZER).unwrap(), + )); let orig = include_str!("../caps/mod.rs").to_string(); let token_limits = [10, 50, 100, 200, 300]; for &token_limit in &token_limits { @@ -190,17 +229,23 @@ mod tests { &"".to_string(), (0, 10), tokenizer.clone(), - token_limit, 2, false); + token_limit, + 2, + false, + ); let mut not_present: Vec = orig.chars().collect(); let mut result = String::new(); for chunk in chunks.iter() { - result.push_str(&format!("\n\n------- {:?} {}-{} -------\n", chunk.symbol_path, chunk.start_line, chunk.end_line)); + result.push_str(&format!( + "\n\n------- {:?} {}-{} -------\n", + chunk.symbol_path, chunk.start_line, chunk.end_line + )); result.push_str(&chunk.window_text); result.push_str("\n"); let mut start_pos = 0; while let Some(found_pos) = orig[start_pos..].find(&chunk.window_text) { let i = start_pos + found_pos; - for j in i .. i + chunk.window_text.len() { + for j in i..i + chunk.window_text.len() { not_present[j] = ' '; } start_pos = i + chunk.window_text.len(); @@ -208,8 +253,12 @@ mod tests { } let not_present_str = not_present.iter().collect::(); println!("====\n{}\n====", result); - assert!(not_present_str.trim().is_empty(), "token_limit={} anything non space means it's missing from vecdb {:?}", token_limit, not_present_str); + assert!( + not_present_str.trim().is_empty(), + "token_limit={} anything non space means it's missing from vecdb {:?}", + token_limit, + not_present_str + ); } } - } diff --git a/refact-agent/engine/src/ast/file_splitter.rs b/refact-agent/engine/src/ast/file_splitter.rs index ab5e28a44..c03dedcaa 100644 --- a/refact-agent/engine/src/ast/file_splitter.rs +++ b/refact-agent/engine/src/ast/file_splitter.rs @@ -14,13 +14,11 @@ use crate::ast::treesitter::file_ast_markup::FileASTMarkup; pub(crate) const LINES_OVERLAP: usize = 3; - pub struct AstBasedFileSplitter { fallback_file_splitter: crate::vecdb::vdb_file_splitter::FileSplitter, } impl AstBasedFileSplitter { - pub fn new(window_size: usize) -> Self { Self { fallback_file_splitter: crate::vecdb::vdb_file_splitter::FileSplitter::new(window_size), @@ -43,7 +41,10 @@ impl AstBasedFileSplitter { Ok(parser) => parser, Err(_e) => { // tracing::info!("cannot find a parser for {:?}, using simple file splitter: {}", crate::nicer_logs::last_n_chars(&path.display().to_string(), 30), e.message); - return self.fallback_file_splitter.vectorization_split(&doc, tokenizer.clone(), tokens_limit, gcx.clone()).await; + return self + .fallback_file_splitter + .vectorization_split(&doc, tokenizer.clone(), tokens_limit, gcx.clone()) + .await; } }; @@ -58,51 +59,87 @@ impl AstBasedFileSplitter { }); } - let ast_markup: FileASTMarkup = match crate::ast::lowlevel_file_markup(&doc, &symbols_struct) { - Ok(x) => x, - Err(e) => { - tracing::info!("lowlevel_file_markup failed for {:?}, using simple file splitter: {}", crate::nicer_logs::last_n_chars(&path.display().to_string(), 30), e); - return self.fallback_file_splitter.vectorization_split(&doc, tokenizer.clone(), tokens_limit, gcx.clone()).await; - } - }; + let ast_markup: FileASTMarkup = + match crate::ast::lowlevel_file_markup(&doc, &symbols_struct) { + Ok(x) => x, + Err(e) => { + tracing::info!( + "lowlevel_file_markup failed for {:?}, using simple file splitter: {}", + crate::nicer_logs::last_n_chars(&path.display().to_string(), 30), + e + ); + return self + .fallback_file_splitter + .vectorization_split(&doc, tokenizer.clone(), tokens_limit, gcx.clone()) + .await; + } + }; - let guid_to_info: HashMap = ast_markup.symbols_sorted_by_path_len.iter().map(|s| (s.guid.clone(), s)).collect(); - let guids: Vec<_> = guid_to_info.iter() + let guid_to_info: HashMap = ast_markup + .symbols_sorted_by_path_len + .iter() + .map(|s| (s.guid.clone(), s)) + .collect(); + let guids: Vec<_> = guid_to_info + .iter() .sorted_by(|a, b| a.1.full_range.start_byte.cmp(&b.1.full_range.start_byte)) - .map(|(s, _)| s.clone()).collect(); + .map(|(s, _)| s.clone()) + .collect(); let mut chunks: Vec = Vec::new(); let mut unused_symbols_cluster_accumulator: Vec<&SymbolInformation> = Default::default(); - let flush_accumulator = | - unused_symbols_cluster_accumulator_: &mut Vec<&SymbolInformation>, - chunks_: &mut Vec, - | { - if !unused_symbols_cluster_accumulator_.is_empty() { - let top_row = unused_symbols_cluster_accumulator_.first().unwrap().full_range.start_point.row; - let bottom_row = unused_symbols_cluster_accumulator_.last().unwrap().full_range.end_point.row; - let content = doc_lines[top_row..bottom_row + 1].join("\n"); - let chunks__ = crate::ast::chunk_utils::get_chunks(&content, &path, &"".to_string(), - (top_row, bottom_row), - tokenizer.clone(), tokens_limit, LINES_OVERLAP, false); - chunks_.extend(chunks__); - unused_symbols_cluster_accumulator_.clear(); - } - }; - + let flush_accumulator = + |unused_symbols_cluster_accumulator_: &mut Vec<&SymbolInformation>, + chunks_: &mut Vec| { + if !unused_symbols_cluster_accumulator_.is_empty() { + let top_row = unused_symbols_cluster_accumulator_ + .first() + .unwrap() + .full_range + .start_point + .row; + let bottom_row = unused_symbols_cluster_accumulator_ + .last() + .unwrap() + .full_range + .end_point + .row; + let content = doc_lines[top_row..bottom_row + 1].join("\n"); + let chunks__ = crate::ast::chunk_utils::get_chunks( + &content, + &path, + &"".to_string(), + (top_row, bottom_row), + tokenizer.clone(), + tokens_limit, + LINES_OVERLAP, + false, + ); + chunks_.extend(chunks__); + unused_symbols_cluster_accumulator_.clear(); + } + }; for guid in &guids { let symbol = guid_to_info.get(&guid).unwrap(); let need_in_vecdb_at_all = match symbol.symbol_type { - SymbolType::StructDeclaration | SymbolType::FunctionDeclaration | - SymbolType::TypeAlias | SymbolType::ClassFieldDeclaration => true, + SymbolType::StructDeclaration + | SymbolType::FunctionDeclaration + | SymbolType::TypeAlias + | SymbolType::ClassFieldDeclaration => true, _ => false, }; if !need_in_vecdb_at_all { let mut is_flushed = false; let mut parent_guid = &symbol.parent_guid; while let Some(_parent_sym) = guid_to_info.get(parent_guid) { - if vec![SymbolType::StructDeclaration, SymbolType::FunctionDeclaration].contains(&_parent_sym.symbol_type) { + if vec![ + SymbolType::StructDeclaration, + SymbolType::FunctionDeclaration, + ] + .contains(&_parent_sym.symbol_type) + { flush_accumulator(&mut unused_symbols_cluster_accumulator, &mut chunks); is_flushed = true; break; @@ -120,20 +157,47 @@ impl AstBasedFileSplitter { if symbol.symbol_type == SymbolType::StructDeclaration { if let Some(children) = guid_to_children.get(&symbol.guid) { if !children.is_empty() { - let skeleton_line = formatter.make_skeleton(&symbol, &doc_text, &guid_to_children, &guid_to_info); - let chunks_ = crate::ast::chunk_utils::get_chunks(&skeleton_line, &symbol.file_path, - &symbol.symbol_path, - (symbol.full_range.start_point.row, symbol.full_range.end_point.row), - tokenizer.clone(), tokens_limit, LINES_OVERLAP, true); + let skeleton_line = formatter.make_skeleton( + &symbol, + &doc_text, + &guid_to_children, + &guid_to_info, + ); + let chunks_ = crate::ast::chunk_utils::get_chunks( + &skeleton_line, + &symbol.file_path, + &symbol.symbol_path, + ( + symbol.full_range.start_point.row, + symbol.full_range.end_point.row, + ), + tokenizer.clone(), + tokens_limit, + LINES_OVERLAP, + true, + ); chunks.extend(chunks_); } } } - let (declaration, top_bottom_rows) = formatter.get_declaration_with_comments(&symbol, &doc_text, &guid_to_children, &guid_to_info); + let (declaration, top_bottom_rows) = formatter.get_declaration_with_comments( + &symbol, + &doc_text, + &guid_to_children, + &guid_to_info, + ); if !declaration.is_empty() { - let chunks_ = crate::ast::chunk_utils::get_chunks(&declaration, &symbol.file_path, - &symbol.symbol_path, top_bottom_rows, tokenizer.clone(), tokens_limit, LINES_OVERLAP, true); + let chunks_ = crate::ast::chunk_utils::get_chunks( + &declaration, + &symbol.file_path, + &symbol.symbol_path, + top_bottom_rows, + tokenizer.clone(), + tokens_limit, + LINES_OVERLAP, + true, + ); chunks.extend(chunks_); } } diff --git a/refact-agent/engine/src/ast/mod.rs b/refact-agent/engine/src/ast/mod.rs index 5acd16348..ae68a31af 100644 --- a/refact-agent/engine/src/ast/mod.rs +++ b/refact-agent/engine/src/ast/mod.rs @@ -8,16 +8,16 @@ use crate::ast::treesitter::file_ast_markup::FileASTMarkup; pub mod treesitter; -pub mod ast_structs; -pub mod ast_parse_anything; -pub mod ast_indexer_thread; pub mod ast_db; +pub mod ast_indexer_thread; +pub mod ast_parse_anything; +pub mod ast_structs; -pub mod file_splitter; pub mod chunk_utils; +pub mod file_splitter; -pub mod parse_python; pub mod parse_common; +pub mod parse_python; pub fn lowlevel_file_markup( doc: &Document, @@ -25,17 +25,25 @@ pub fn lowlevel_file_markup( ) -> Result { let t0 = std::time::Instant::now(); assert!(doc.doc_text.is_some()); - let mut symbols4export: Vec>> = symbols.iter().map(|s| { - Arc::new(RefCell::new(s.clone())) - }).collect(); - let guid_to_symbol: HashMap>> = symbols4export.iter().map( - |s| (s.borrow().guid.clone(), s.clone()) - ).collect(); - fn recursive_path_of_guid(guid_to_symbol: &HashMap>>, guid: &Uuid) -> String - { + let mut symbols4export: Vec>> = symbols + .iter() + .map(|s| Arc::new(RefCell::new(s.clone()))) + .collect(); + let guid_to_symbol: HashMap>> = symbols4export + .iter() + .map(|s| (s.borrow().guid.clone(), s.clone())) + .collect(); + fn recursive_path_of_guid( + guid_to_symbol: &HashMap>>, + guid: &Uuid, + ) -> String { return match guid_to_symbol.get(guid) { Some(x) => { - let pname = if !x.borrow().name.is_empty() { x.borrow().name.clone() } else { x.borrow().guid.to_string()[..8].to_string() }; + let pname = if !x.borrow().name.is_empty() { + x.borrow().name.clone() + } else { + x.borrow().guid.to_string()[..8].to_string() + }; let pp = recursive_path_of_guid(&guid_to_symbol, &x.borrow().parent_guid); format!("{}::{}", pp, pname) } @@ -52,19 +60,21 @@ pub fn lowlevel_file_markup( } // longer symbol path at the bottom => parent always higher than children symbols4export.sort_by(|a, b| { - a.borrow().symbol_path.len().cmp(&b.borrow().symbol_path.len()) + a.borrow() + .symbol_path + .len() + .cmp(&b.borrow().symbol_path.len()) }); let x = FileASTMarkup { // file_path: doc.doc_path.clone(), // file_content: doc.doc_text.as_ref().unwrap().to_string(), - symbols_sorted_by_path_len: symbols4export.iter().map(|s| { - s.borrow().clone() - }).collect(), + symbols_sorted_by_path_len: symbols4export.iter().map(|s| s.borrow().clone()).collect(), }; - tracing::info!("file_markup {:>4} symbols in {:.3}ms for {}", + tracing::info!( + "file_markup {:>4} symbols in {:.3}ms for {}", x.symbols_sorted_by_path_len.len(), t0.elapsed().as_secs_f32(), - crate::nicer_logs::last_n_chars(&doc.doc_path.to_string_lossy().to_string(), - 30)); + crate::nicer_logs::last_n_chars(&doc.doc_path.to_string_lossy().to_string(), 30) + ); Ok(x) } diff --git a/refact-agent/engine/src/ast/parse_common.rs b/refact-agent/engine/src/ast/parse_common.rs index 0bb8f490d..4bcb0aa01 100644 --- a/refact-agent/engine/src/ast/parse_common.rs +++ b/refact-agent/engine/src/ast/parse_common.rs @@ -4,11 +4,10 @@ use tree_sitter::{Node, Parser, Range}; use crate::ast::ast_structs::{AstDefinition, AstUsage, AstErrorStats}; - #[derive(Debug)] pub struct Thing { #[allow(dead_code)] - pub tline: usize, // only needed for printing in this file + pub tline: usize, // only needed for printing in this file pub public: bool, pub thing_kind: char, pub type_resolved: String, @@ -38,7 +37,9 @@ pub struct ContextAnyParser { impl ContextAnyParser { pub fn error_report(&mut self, node: &Node, msg: String) -> String { let line = node.range().start_point.row + 1; - let mut node_text = self.code[node.byte_range()].to_string().replace("\n", "\\n"); + let mut node_text = self.code[node.byte_range()] + .to_string() + .replace("\n", "\\n"); if node_text.len() > 50 { node_text = node_text.chars().take(50).collect(); node_text.push_str("..."); @@ -46,8 +47,13 @@ impl ContextAnyParser { self.errs.add_error( "".to_string(), line, - format!("{msg}: {:?} in {node_text}", node.kind()).as_str()); - return format!("line {}: {msg} {}", line, self.recursive_print_with_red_brackets(node)); + format!("{msg}: {:?} in {node_text}", node.kind()).as_str(), + ); + return format!( + "line {}: {msg} {}", + line, + self.recursive_print_with_red_brackets(node) + ); } pub fn recursive_print_with_red_brackets(&self, node: &Node) -> String { @@ -58,9 +64,10 @@ impl ContextAnyParser { let mut result = String::new(); let color_code = if rec >= 1 { "\x1b[90m" } else { "\x1b[31m" }; match node.kind() { - "from" | "class" | "import" | "def" | "if" | "for" | ":" | "," | "=" | "." | "(" | ")" | "[" | "]" | "->" => { + "from" | "class" | "import" | "def" | "if" | "for" | ":" | "," | "=" | "." | "(" + | ")" | "[" | "]" | "->" => { result.push_str(&self.code[node.byte_range()]); - }, + } _ => { result.push_str(&format!("{}{}[\x1b[0m", color_code, node.kind())); for i in 0..node.child_count() { @@ -71,7 +78,8 @@ impl ContextAnyParser { } else if rec == 0 { result.push_str(&format!("\x1b[35mnaf\x1b[0m")); } - result.push_str(&self._recursive_print_with_red_brackets_helper(&child, rec + 1)); + result + .push_str(&self._recursive_print_with_red_brackets_helper(&child, rec + 1)); } if node.child_count() == 0 { result.push_str(&self.code[node.byte_range()]); @@ -83,7 +91,7 @@ impl ContextAnyParser { } pub fn indent(&self) -> String { - return " ".repeat(self.reclevel*4); + return " ".repeat(self.reclevel * 4); } pub fn indented_println(&self, args: std::fmt::Arguments) { @@ -94,7 +102,13 @@ impl ContextAnyParser { pub fn dump(&self) { println!("\n -- things -- "); for (key, thing) in self.things.iter() { - println!("{:<40} {} {:<40} {}", key, thing.thing_kind, thing.type_resolved, if thing.public { "pub" } else { "" } ); + println!( + "{:<40} {} {:<40} {}", + key, + thing.thing_kind, + thing.type_resolved, + if thing.public { "pub" } else { "" } + ); } println!(" -- /things --\n"); @@ -134,7 +148,10 @@ impl ContextAnyParser { usages_on_line.push(format!("{:?}", usage)); } } - let indent = line.chars().take_while(|c| c.is_whitespace()).collect::(); + let indent = line + .chars() + .take_while(|c| c.is_whitespace()) + .collect::(); for err in &self.errs.errors { if err.err_line == i + 1 { r.push_str(format!("\n{indent}{comment} ERROR {}", err.err_message).as_str()); @@ -146,11 +163,19 @@ impl ContextAnyParser { if thing.thing_kind == 'f' { key_last += "()"; } - r.push_str(format!("\n{indent}{comment} {} {} {}", thing.thing_kind, key_last, thing.type_resolved).as_str()); + r.push_str( + format!( + "\n{indent}{comment} {} {} {}", + thing.thing_kind, key_last, thing.type_resolved + ) + .as_str(), + ); } } if !usages_on_line.is_empty() { - r.push_str(format!("\n{}{} {}", indent, comment, usages_on_line.join(" ")).as_str()); + r.push_str( + format!("\n{}{} {}", indent, comment, usages_on_line.join(" ")).as_str(), + ); } r.push('\n'); r.push_str(line); @@ -158,7 +183,8 @@ impl ContextAnyParser { r } - pub fn export_defs(&mut self, cpath: &str) -> Vec { // self.defs becomes empty after this operation + pub fn export_defs(&mut self, cpath: &str) -> Vec { + // self.defs becomes empty after this operation for (def_key, def) in &mut self.defs { let def_offpath = def.official_path.join("::"); assert!(*def_key == def_offpath || format!("{}::", *def_key) == def_offpath); @@ -167,7 +193,11 @@ impl ContextAnyParser { } for (usage_at, usage) in &self.usages { // println!("usage_at {} {:?} usage.resolved_as={:?}", usage_at, usage, usage.resolved_as); - assert!(usage.resolved_as.is_empty() || usage.resolved_as.starts_with("root::") || usage.resolved_as.starts_with("?::")); + assert!( + usage.resolved_as.is_empty() + || usage.resolved_as.starts_with("root::") + || usage.resolved_as.starts_with("?::") + ); let mut atv = usage_at.split("::").collect::>(); let mut found_home = false; while !atv.is_empty() { @@ -183,7 +213,7 @@ impl ContextAnyParser { self.errs.add_error( "".to_string(), usage.uline + 1, - format!("cannot find parent for {}", usage_at).as_str() + format!("cannot find parent for {}", usage_at).as_str(), ); } } @@ -193,8 +223,7 @@ impl ContextAnyParser { } } -pub fn line12mid_from_ranges(full_range: &Range, body_range: &Range) -> (usize, usize, usize) -{ +pub fn line12mid_from_ranges(full_range: &Range, body_range: &Range) -> (usize, usize, usize) { let line1: usize = full_range.start_point.row; let mut line_mid: usize = full_range.end_point.row; let line2: usize = full_range.end_point.row; @@ -206,7 +235,6 @@ pub fn line12mid_from_ranges(full_range: &Range, body_range: &Range) -> (usize, (line1, line2, line_mid) } - // ----------------------------------------------------------- // pub fn any_child_of_type_recursive<'a>(node: Node<'a>, of_type: &str) -> Option> @@ -222,9 +250,8 @@ pub fn line12mid_from_ranges(full_range: &Range, body_range: &Range) -> (usize, // None // } -pub fn any_child_of_type<'a>(node: Node<'a>, of_type: &str) -> Option> -{ - for i in 0 .. node.child_count() { +pub fn any_child_of_type<'a>(node: Node<'a>, of_type: &str) -> Option> { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); if child.kind() == of_type { return Some(child); @@ -233,27 +260,25 @@ pub fn any_child_of_type<'a>(node: Node<'a>, of_type: &str) -> Option> None } -pub fn type_call(t: String, _arg_types: String) -> String -{ +pub fn type_call(t: String, _arg_types: String) -> String { if t.starts_with("ERR/") { return t; } // my_function() t="!MyReturnType" => "MyReturnType" if t.starts_with("!") { - return t[1 ..].to_string(); + return t[1..].to_string(); } return "?".to_string(); } -pub fn type_deindex(t: String) -> String -{ +pub fn type_deindex(t: String) -> String { if t.starts_with("ERR/") { return t; } // Used in this scenario: for x in my_list // t="[MyType]" => "MyType" if t.starts_with("[") && t.ends_with("]") { - return t[1 .. t.len()-1].to_string(); + return t[1..t.len() - 1].to_string(); } // can't do anything for () return "".to_string(); @@ -269,23 +294,23 @@ pub fn type_zerolevel_comma_split(t: &str) -> Vec { '[' => { level_brackets1 += 1; current.push(c); - }, + } ']' => { level_brackets1 -= 1; current.push(c); - }, + } '(' => { level_brackets2 += 1; current.push(c); - }, + } ')' => { level_brackets2 -= 1; current.push(c); - }, + } ',' if level_brackets1 == 0 && level_brackets2 == 0 => { parts.push(current.to_string()); current = String::new(); - }, + } _ => { current.push(c); } @@ -295,15 +320,14 @@ pub fn type_zerolevel_comma_split(t: &str) -> Vec { parts } -pub fn type_deindex_n(t: String, n: usize) -> String -{ +pub fn type_deindex_n(t: String, n: usize) -> String { if t.starts_with("ERR/") { return t; } // Used in this scenario: _, _ = my_value // t="[MyClass1,[int,int],MyClass2]" => n==0 MyClass1 n==1 [int,int] n==2 MyClass2 if t.starts_with("(") && t.ends_with(")") { - let no_square = t[1 .. t.len()-1].to_string(); + let no_square = t[1..t.len() - 1].to_string(); let parts = type_zerolevel_comma_split(&no_square); if n < parts.len() { return parts[n].to_string(); diff --git a/refact-agent/engine/src/ast/parse_python.rs b/refact-agent/engine/src/ast/parse_python.rs index 173ae096a..8ebe9244c 100644 --- a/refact-agent/engine/src/ast/parse_python.rs +++ b/refact-agent/engine/src/ast/parse_python.rs @@ -3,7 +3,10 @@ use tree_sitter::{Node, Parser}; use crate::ast::ast_structs::{AstDefinition, AstUsage, AstErrorStats}; use crate::ast::treesitter::structs::SymbolType; -use crate::ast::parse_common::{ContextAnyParser, Thing, any_child_of_type, type_deindex, type_deindex_n, type_call, type_zerolevel_comma_split}; +use crate::ast::parse_common::{ + ContextAnyParser, Thing, any_child_of_type, type_deindex, type_deindex_n, type_call, + type_zerolevel_comma_split, +}; const DEBUG: bool = false; @@ -12,7 +15,6 @@ const DEBUG: bool = false; // - type aliases // - star imports - pub struct ContextPy { pub ap: ContextAnyParser, } @@ -42,16 +44,20 @@ fn py_trivial(potential_usage: &str) -> Option { "?::float" | "float" => Some("float".to_string()), "?::bool" | "bool" => Some("bool".to_string()), "?::str" | "str" => Some("str".to_string()), - "Any" => { Some("*".to_string()) }, - "__name__" => { Some("str".to_string()) }, - "range" => { Some("![int]".to_string()) }, + "Any" => Some("*".to_string()), + "__name__" => Some("str".to_string()), + "range" => Some("![int]".to_string()), // "print" => { Some("!void".to_string()) }, _ => None, } } -fn py_simple_resolve(cx: &mut ContextPy, path: &Vec, look_for: &String, uline: usize) -> AstUsage -{ +fn py_simple_resolve( + cx: &mut ContextPy, + path: &Vec, + look_for: &String, + uline: usize, +) -> AstUsage { if let Some(t) = py_trivial(look_for) { return AstUsage { resolved_as: t, @@ -92,17 +98,36 @@ fn py_simple_resolve(cx: &mut ContextPy, path: &Vec, look_for: &String, }; } -fn py_add_a_thing<'a>(cx: &mut ContextPy, thing_path: &String, thing_kind: char, type_new: String, node: &Node<'a>) -> (bool, String) -{ +fn py_add_a_thing<'a>( + cx: &mut ContextPy, + thing_path: &String, + thing_kind: char, + type_new: String, + node: &Node<'a>, +) -> (bool, String) { if let Some(thing_exists) = cx.ap.things.get(thing_path) { if thing_exists.thing_kind != thing_kind { - let msg = cx.ap.error_report(node, format!("py_add_a_thing both {:?} and {:?} exist", thing_exists.thing_kind, thing_kind)); + let msg = cx.ap.error_report( + node, + format!( + "py_add_a_thing both {:?} and {:?} exist", + thing_exists.thing_kind, thing_kind + ), + ); debug!(cx, "{}", msg); return (false, type_new.clone()); } - let good_idea_to_write = type_problems(&thing_exists.type_resolved) > type_problems(&type_new); + let good_idea_to_write = + type_problems(&thing_exists.type_resolved) > type_problems(&type_new); if good_idea_to_write { - debug!(cx, "TYPE UPDATE {thing_kind} {thing_path} TYPE {} problems={:?} => {} problems={:?}", thing_exists.type_resolved, type_problems(&thing_exists.type_resolved), type_new, type_problems(&type_new)); + debug!( + cx, + "TYPE UPDATE {thing_kind} {thing_path} TYPE {} problems={:?} => {} problems={:?}", + thing_exists.type_resolved, + type_problems(&thing_exists.type_resolved), + type_new, + type_problems(&type_new) + ); cx.ap.resolved_anything = true; } else { return (false, thing_exists.type_resolved.clone()); @@ -110,12 +135,15 @@ fn py_add_a_thing<'a>(cx: &mut ContextPy, thing_path: &String, thing_kind: char, } else { debug!(cx, "ADD {thing_kind} {thing_path} {}", type_new); } - cx.ap.things.insert(thing_path.clone(), Thing { - tline: node.range().start_point.row, - public: py_is_public(cx, thing_path), - thing_kind, - type_resolved: type_new.clone(), - }); + cx.ap.things.insert( + thing_path.clone(), + Thing { + tline: node.range().start_point.row, + public: py_is_public(cx, thing_path), + thing_kind, + type_resolved: type_new.clone(), + }, + ); return (true, type_new); } @@ -126,63 +154,95 @@ fn py_is_public(cx: &ContextPy, path_str: &String) -> bool { // return false; // } // } - for i in 1 .. path.len() { - let parent_path = path[0 .. i].join("::"); + for i in 1..path.len() { + let parent_path = path[0..i].join("::"); if let Some(parent_thing) = cx.ap.things.get(&parent_path) { match parent_thing.thing_kind { - 's' => { return parent_thing.public; }, - 'f' => { return false; }, - _ => { }, + 's' => { + return parent_thing.public; + } + 'f' => { + return false; + } + _ => {} } } } true } -fn py_import_save<'a>(cx: &mut ContextPy, path: &Vec, dotted_from: String, import_what: String, import_as: String) -{ +fn py_import_save<'a>( + cx: &mut ContextPy, + path: &Vec, + dotted_from: String, + import_what: String, + import_as: String, +) { let save_as = format!("{}::{}", path.join("::"), import_as); - let mut p = dotted_from.split(".").map(|x| { String::from(x.trim()) }).filter(|x| { !x.is_empty() }).collect::>(); + let mut p = dotted_from + .split(".") + .map(|x| String::from(x.trim())) + .filter(|x| !x.is_empty()) + .collect::>(); p.push(import_what); p.insert(0, "?".to_string()); cx.ap.alias.insert(save_as, p.join("::")); } -fn py_import<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -{ +fn py_import<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { let mut dotted_from = String::new(); let mut just_do_it = false; let mut from_clause = false; - for i in 0 .. node.child_count() { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); let child_text = cx.ap.code[child.byte_range()].to_string(); match child.kind() { - "import" => { just_do_it = true; }, - "from" => { from_clause = true; }, + "import" => { + just_do_it = true; + } + "from" => { + from_clause = true; + } "dotted_name" => { if just_do_it { - py_import_save(cx, path, dotted_from.clone(), child_text.clone(), child_text.clone()); + py_import_save( + cx, + path, + dotted_from.clone(), + child_text.clone(), + child_text.clone(), + ); } else if from_clause { dotted_from = child_text.clone(); } - }, + } "aliased_import" => { let mut import_what = String::new(); for i in 0..child.child_count() { let subch = child.child(i).unwrap(); let subch_text = cx.ap.code[subch.byte_range()].to_string(); match subch.kind() { - "dotted_name" => { import_what = subch_text; }, - "as" => { }, - "identifier" => { py_import_save(cx, path, dotted_from.clone(), import_what.clone(), subch_text); }, + "dotted_name" => { + import_what = subch_text; + } + "as" => {} + "identifier" => { + py_import_save( + cx, + path, + dotted_from.clone(), + import_what.clone(), + subch_text, + ); + } _ => { let msg = cx.ap.error_report(&child, format!("aliased_import syntax")); debug!(cx, "{}", msg); - }, + } } } - }, - "," => {}, + } + "," => {} _ => { let msg = cx.ap.error_report(&child, format!("import syntax")); debug!(cx, "{}", msg); @@ -191,8 +251,12 @@ fn py_import<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) } } -fn py_resolve_dotted_creating_usages<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec, allow_creation: bool) -> Option -{ +fn py_resolve_dotted_creating_usages<'a>( + cx: &mut ContextPy, + node: &Node<'a>, + path: &Vec, + allow_creation: bool, +) -> Option { let node_text = cx.ap.code[node.byte_range()].to_string(); // debug!(cx, "DOTTED {}", cx.ap.recursive_print_with_red_brackets(&node)); match node.kind() { @@ -211,7 +275,7 @@ fn py_resolve_dotted_creating_usages<'a>(cx: &mut ContextPy, node: &Node<'a>, pa cx.ap.usages.push((path.join("::"), u.clone())); } return Some(u); - }, + } "attribute" => { let object = node.child_by_field_name("object").unwrap(); let attrib = node.child_by_field_name("attribute").unwrap(); @@ -239,49 +303,56 @@ fn py_resolve_dotted_creating_usages<'a>(cx: &mut ContextPy, node: &Node<'a>, pa u.targets_for_guesswork.push(format!("?::{}", attrib_text)); cx.ap.usages.push((path.join("::"), u.clone())); return Some(u); - }, + } _ => { - let msg = cx.ap.error_report(node, format!("py_resolve_dotted_creating_usages syntax")); + let msg = cx + .ap + .error_report(node, format!("py_resolve_dotted_creating_usages syntax")); debug!(cx, "{}", msg); } } None } -fn py_lhs_tuple<'a>(cx: &mut ContextPy, left: &Node<'a>, type_node: Option>, path: &Vec) -> (Vec<(Node<'a>, String)>, bool) -{ +fn py_lhs_tuple<'a>( + cx: &mut ContextPy, + left: &Node<'a>, + type_node: Option>, + path: &Vec, +) -> (Vec<(Node<'a>, String)>, bool) { let mut lhs_tuple: Vec<(Node, String)> = Vec::new(); let mut is_list = false; match left.kind() { "pattern_list" | "tuple_pattern" => { is_list = true; - for j in 0 .. left.child_count() { + for j in 0..left.child_count() { let child = left.child(j).unwrap(); match child.kind() { "identifier" | "attribute" => { lhs_tuple.push((child, "?".to_string())); - }, - "," | "(" | ")" => { }, + } + "," | "(" | ")" => {} _ => { - let msg = cx.ap.error_report(&child, format!("py_lhs_tuple list syntax")); + let msg = cx + .ap + .error_report(&child, format!("py_lhs_tuple list syntax")); debug!(cx, "{}", msg); } } } - }, + } "identifier" | "attribute" => { lhs_tuple.push((*left, py_type_generic(cx, type_node, path, 0))); - }, + } _ => { let msg = cx.ap.error_report(left, format!("py_lhs_tuple syntax")); debug!(cx, "{}", msg); - }, + } } (lhs_tuple, is_list) } -fn py_assignment<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec, is_for_loop: bool) -{ +fn py_assignment<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec, is_for_loop: bool) { let left_node = node.child_by_field_name("left"); let right_node = node.child_by_field_name("right"); let mut rhs_type = py_type_of_expr_creating_usages(cx, right_node, path); @@ -291,66 +362,103 @@ fn py_assignment<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec, is if left_node.is_none() { return; } - let (lhs_tuple, is_list) = py_lhs_tuple(cx, &left_node.unwrap(), node.child_by_field_name("type"), path); - for n in 0 .. lhs_tuple.len() { + let (lhs_tuple, is_list) = py_lhs_tuple( + cx, + &left_node.unwrap(), + node.child_by_field_name("type"), + path, + ); + for n in 0..lhs_tuple.len() { let (lhs_lvalue, lvalue_type) = &lhs_tuple[n]; if is_list { - py_var_add(cx, lhs_lvalue, lvalue_type.clone(), type_deindex_n(rhs_type.clone(), n), path); + py_var_add( + cx, + lhs_lvalue, + lvalue_type.clone(), + type_deindex_n(rhs_type.clone(), n), + path, + ); } else { py_var_add(cx, lhs_lvalue, lvalue_type.clone(), rhs_type.clone(), path); } } } -fn py_var_add<'a>(cx: &mut ContextPy, lhs_lvalue: &Node<'a>, lvalue_type: String, rhs_type: String, path: &Vec) -{ - let lvalue_usage = if let Some(u) = py_resolve_dotted_creating_usages(cx, lhs_lvalue, path, true) { - u - } else { - let msg = cx.ap.error_report(lhs_lvalue, format!("py_var_add cannot form lvalue")); - debug!(cx, "{}", msg); - return; - }; +fn py_var_add<'a>( + cx: &mut ContextPy, + lhs_lvalue: &Node<'a>, + lvalue_type: String, + rhs_type: String, + path: &Vec, +) { + let lvalue_usage = + if let Some(u) = py_resolve_dotted_creating_usages(cx, lhs_lvalue, path, true) { + u + } else { + let msg = cx + .ap + .error_report(lhs_lvalue, format!("py_var_add cannot form lvalue")); + debug!(cx, "{}", msg); + return; + }; let lvalue_path; - if lvalue_usage.targets_for_guesswork.is_empty() { // no guessing, exact location + if lvalue_usage.targets_for_guesswork.is_empty() { + // no guessing, exact location lvalue_path = lvalue_usage.resolved_as.clone(); } else { // typical for creating things in a different file, or for example a.b.c = 5 when b doesn't exit - let msg = cx.ap.error_report(lhs_lvalue, format!("py_var_add cannot create")); + let msg = cx + .ap + .error_report(lhs_lvalue, format!("py_var_add cannot create")); debug!(cx, "{}", msg); return; } - let potential_new_type = if type_problems(&lvalue_type) > type_problems(&rhs_type) { rhs_type.clone() } else { lvalue_type.clone() }; - let (upd, best_return_type) = py_add_a_thing(cx, &lvalue_path, 'v', potential_new_type, lhs_lvalue); + let potential_new_type = if type_problems(&lvalue_type) > type_problems(&rhs_type) { + rhs_type.clone() + } else { + lvalue_type.clone() + }; + let (upd, best_return_type) = + py_add_a_thing(cx, &lvalue_path, 'v', potential_new_type, lhs_lvalue); // let (upd2, best_return_type) = py_add_a_thing(cx, &func_path_str, 'f', format!("!{}", ret_type), node); if upd { let path: Vec = lvalue_path.split("::").map(String::from).collect(); - cx.ap.defs.insert(lvalue_path.clone(), AstDefinition { - official_path: path, - symbol_type: SymbolType::VariableDefinition, - usages: vec![], - resolved_type: best_return_type, - this_is_a_class: "".to_string(), - this_class_derived_from: vec![], - cpath: "".to_string(), - decl_line1: lhs_lvalue.range().start_point.row + 1, - decl_line2: lhs_lvalue.range().end_point.row + 1, - body_line1: 0, - body_line2: 0, - }); + cx.ap.defs.insert( + lvalue_path.clone(), + AstDefinition { + official_path: path, + symbol_type: SymbolType::VariableDefinition, + usages: vec![], + resolved_type: best_return_type, + this_is_a_class: "".to_string(), + this_class_derived_from: vec![], + cpath: "".to_string(), + decl_line1: lhs_lvalue.range().start_point.row + 1, + decl_line2: lhs_lvalue.range().end_point.row + 1, + body_line1: 0, + body_line2: 0, + }, + ); } } -fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec, level: usize) -> String { +fn py_type_generic<'a>( + cx: &mut ContextPy, + node: Option>, + path: &Vec, + level: usize, +) -> String { if node.is_none() { - return format!("?") + return format!("?"); } // type[generic_type[identifier[List]type_parameter[[type[identifier[Goat]]]]]]] // type[generic_type[identifier[List]type_parameter[[type[generic_type[identifier[Optional]type_parameter[[type[identifier[Goat]]]]]]]] let node = node.unwrap(); match node.kind() { - "none" => { format!("void") }, - "type" => { py_type_generic(cx, node.child(0), path, level+1) }, + "none" => { + format!("void") + } + "type" => py_type_generic(cx, node.child(0), path, level + 1), "identifier" | "attribute" => { if let Some(a_type) = py_resolve_dotted_creating_usages(cx, &node, path, false) { if !a_type.resolved_as.is_empty() { @@ -360,8 +468,10 @@ fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec { format!("CALLABLE_ARGLIST") }, + } + "list" => { + format!("CALLABLE_ARGLIST") + } "generic_type" => { let mut inside_type = String::new(); let mut todo = ""; @@ -376,8 +486,12 @@ fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec todo = "Tuple", ("identifier", "Callable") => todo = "Callable", ("identifier", "Optional") => todo = "Optional", - ("identifier", _) | ("attribute", _) => inside_type = format!("ERR/ID/{}", child_text), - ("type_parameter", _) => inside_type = py_type_generic(cx, Some(child), path, level+1), + ("identifier", _) | ("attribute", _) => { + inside_type = format!("ERR/ID/{}", child_text) + } + ("type_parameter", _) => { + inside_type = py_type_generic(cx, Some(child), path, level + 1) + } (_, _) => inside_type = format!("ERR/GENERIC/{:?}", child.kind()), } } @@ -393,7 +507,7 @@ fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec { let split = type_zerolevel_comma_split(inside_type.as_str()); if split.len() == 2 { @@ -401,8 +515,8 @@ fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec format!("NOTHING_TODO/{}", inside_type) + } + _ => format!("NOTHING_TODO/{}", inside_type), }; // debug!(cx, "{}=> TODO {}", spaces, result); result @@ -410,42 +524,59 @@ fn py_type_generic<'a>(cx: &mut ContextPy, node: Option>, path: &Vec { // type_parameter[ "[" "type" "," "type" "]" ] let mut comma_sep_types = String::new(); - for i in 0 .. node.child_count() { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); - comma_sep_types.push_str(match child.kind() { - "[" | "]" => "".to_string(), - "type" | "identifier" => py_type_generic(cx, Some(child), path, level+1), - "," => ",".to_string(), - _ => format!("SOMETHING/{:?}/{}", child.kind(), cx.ap.code[child.byte_range()].to_string()) - }.as_str()); + comma_sep_types.push_str( + match child.kind() { + "[" | "]" => "".to_string(), + "type" | "identifier" => py_type_generic(cx, Some(child), path, level + 1), + "," => ",".to_string(), + _ => format!( + "SOMETHING/{:?}/{}", + child.kind(), + cx.ap.code[child.byte_range()].to_string() + ), + } + .as_str(), + ); } comma_sep_types } _ => { let msg = cx.ap.error_report(&node, format!("py_type_generic syntax")); debug!(cx, "{}", msg); - format!("UNK/{:?}/{}", node.kind(), cx.ap.code[node.byte_range()].to_string()) + format!( + "UNK/{:?}/{}", + node.kind(), + cx.ap.code[node.byte_range()].to_string() + ) } } } -fn py_string<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -> String -{ +fn py_string<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -> String { for i in 0..node.child_count() { let child = node.child(i).unwrap(); // debug!(cx, " string child[{}] {}", i, cx.ap.recursive_print_with_red_brackets(&child)); match child.kind() { "interpolation" => { - let _ = py_type_of_expr_creating_usages(cx, child.child_by_field_name("expression"), path); - }, - _ => { }, + let _ = py_type_of_expr_creating_usages( + cx, + child.child_by_field_name("expression"), + path, + ); + } + _ => {} } } "str".to_string() } -fn py_type_of_expr_creating_usages<'a>(cx: &mut ContextPy, node: Option>, path: &Vec) -> String -{ +fn py_type_of_expr_creating_usages<'a>( + cx: &mut ContextPy, + node: Option>, + path: &Vec, +) -> String { if node.is_none() { return "".to_string(); } @@ -459,85 +590,99 @@ fn py_type_of_expr_creating_usages<'a>(cx: &mut ContextPy, node: Option for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { - "(" | "," |")" => { continue; } + "(" | "," | ")" => { + continue; + } _ => {} } elements.push(py_type_of_expr_creating_usages(cx, Some(child), path)); } format!("({})", elements.join(",")) - }, + } "tuple" => { let mut elements = vec![]; for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { - "(" | "," |")" => { continue; } + "(" | "," | ")" => { + continue; + } _ => {} } elements.push(py_type_of_expr_creating_usages(cx, Some(child), path)); } format!("({})", elements.join(",")) - }, + } "comparison_operator" => { - for i in 0 .. node.child_count() { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { - "is" | "is not" | ">" | "<" | "<=" | "==" | "!=" | ">=" | "%" => { continue; } + "is" | "is not" | ">" | "<" | "<=" | "==" | "!=" | ">=" | "%" => { + continue; + } _ => {} } py_type_of_expr_creating_usages(cx, Some(child), path); } "bool".to_string() - }, + } "binary_operator" => { - let left_type = py_type_of_expr_creating_usages(cx, node.child_by_field_name("left"), path); - let _right_type = py_type_of_expr_creating_usages(cx, node.child_by_field_name("right"), path); - let _op = cx.ap.code[node.child_by_field_name("operator").unwrap().byte_range()].to_string(); + let left_type = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("left"), path); + let _right_type = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("right"), path); + let _op = + cx.ap.code[node.child_by_field_name("operator").unwrap().byte_range()].to_string(); left_type - }, + } "unary_operator" | "not_operator" => { // ignore "operator" - let arg_type = py_type_of_expr_creating_usages(cx, node.child_by_field_name("argument"), path); + let arg_type = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("argument"), path); arg_type - }, - "integer" => { "int".to_string() }, - "float" => { "float".to_string() }, - "string" => { py_string(cx, &node, path) }, - "false" => { "bool".to_string() }, - "true" => { "bool".to_string() }, - "none" => { "void".to_string() }, + } + "integer" => "int".to_string(), + "float" => "float".to_string(), + "string" => py_string(cx, &node, path), + "false" => "bool".to_string(), + "true" => "bool".to_string(), + "none" => "void".to_string(), "call" => { let fname = node.child_by_field_name("function").unwrap(); let ftype = py_type_of_expr_creating_usages(cx, Some(fname), path); - let arg_types = py_type_of_expr_creating_usages(cx, node.child_by_field_name("arguments"), path); + let arg_types = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("arguments"), path); let ret_type = type_call(ftype.clone(), arg_types.clone()); ret_type - }, + } "identifier" | "dotted_name" | "attribute" => { - let dotted_type = if let Some(u) = py_resolve_dotted_creating_usages(cx, &node, path, false) { - if u.resolved_as.starts_with("!") { // trivial function, like "range" that has type ![int] - u.resolved_as - } else if !u.resolved_as.is_empty() { - if let Some(resolved_thing) = cx.ap.things.get(&u.resolved_as) { - resolved_thing.type_resolved.clone() + let dotted_type = + if let Some(u) = py_resolve_dotted_creating_usages(cx, &node, path, false) { + if u.resolved_as.starts_with("!") { + // trivial function, like "range" that has type ![int] + u.resolved_as + } else if !u.resolved_as.is_empty() { + if let Some(resolved_thing) = cx.ap.things.get(&u.resolved_as) { + resolved_thing.type_resolved.clone() + } else { + format!("?::{}", u.resolved_as) + } } else { - format!("?::{}", u.resolved_as) + // assert!(u.targets_for_guesswork.len() > 0); + // u.targets_for_guesswork[0].clone() + format!("ERR/FUNC_NOT_FOUND/{}", u.targets_for_guesswork[0]) } } else { - // assert!(u.targets_for_guesswork.len() > 0); - // u.targets_for_guesswork[0].clone() - format!("ERR/FUNC_NOT_FOUND/{}", u.targets_for_guesswork[0]) - } - } else { - format!("ERR/DOTTED_NOT_FOUND/{}", node_text) - }; + format!("ERR/DOTTED_NOT_FOUND/{}", node_text) + }; dotted_type - }, + } "subscript" => { - let typeof_value = py_type_of_expr_creating_usages(cx, node.child_by_field_name("value"), path); + let typeof_value = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("value"), path); py_type_of_expr_creating_usages(cx, node.child_by_field_name("subscript"), path); type_deindex(typeof_value) - }, + } "list_comprehension" => { let mut path_anon = path.clone(); path_anon.push("".to_string()); @@ -550,8 +695,10 @@ fn py_type_of_expr_creating_usages<'a>(cx: &mut ContextPy, node: Option } else { format!("ERR/EXPR/list_comprehension/no_for") } - }, - "keyword_argument" => { format!("void") }, + } + "keyword_argument" => { + format!("void") + } _ => { let msg = cx.ap.error_report(&node, format!("py_type_of_expr syntax")); debug!(cx, "{}", msg); @@ -563,14 +710,13 @@ fn py_type_of_expr_creating_usages<'a>(cx: &mut ContextPy, node: Option type_of } -fn py_class<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -{ +fn py_class<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { let mut derived_from = vec![]; let mut class_name = "".to_string(); let mut body = None; let mut body_line1 = usize::MAX; let mut body_line2 = 0; - for i in 0 .. node.child_count() { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { "class" | ":" => continue, @@ -580,25 +726,35 @@ fn py_class<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) body_line2 = body_line2.max(child.range().end_point.row + 1); body = Some(child); break; - }, + } "argument_list" => { - for j in 0 .. child.child_count() { + for j in 0..child.child_count() { let arg = child.child(j).unwrap(); match arg.kind() { "identifier" | "attribute" => { - if let Some(a_type) = py_resolve_dotted_creating_usages(cx, &arg, path, false) { + if let Some(a_type) = + py_resolve_dotted_creating_usages(cx, &arg, path, false) + { if !a_type.resolved_as.is_empty() { // XXX losing information, we have resolved usage, turning it into approx 🔎-link - let after_last_colon_colon = a_type.resolved_as.split("::").last().unwrap().to_string(); + let after_last_colon_colon = + a_type.resolved_as.split("::").last().unwrap().to_string(); derived_from.push(format!("py🔎{}", after_last_colon_colon)); } else { // could be better than a guess, too assert!(!a_type.targets_for_guesswork.is_empty()); - let after_last_colon_colon = a_type.targets_for_guesswork.first().unwrap().split("::").last().unwrap().to_string(); + let after_last_colon_colon = a_type + .targets_for_guesswork + .first() + .unwrap() + .split("::") + .last() + .unwrap() + .to_string(); derived_from.push(format!("py🔎{}", after_last_colon_colon)); } } - }, + } "," | "(" | ")" => continue, _ => { let msg = cx.ap.error_report(&arg, format!("py_class dfrom syntax")); @@ -606,7 +762,7 @@ fn py_class<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) } } } - }, + } _ => { let msg = cx.ap.error_report(&child, format!("py_class syntax")); debug!(cx, "{}", msg); @@ -627,32 +783,37 @@ fn py_class<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) let class_path = [path.clone(), vec![class_name.clone()]].concat(); let class_path_str = class_path.join("::"); - cx.ap.defs.insert(class_path_str.clone(), AstDefinition { - official_path: class_path.clone(), - symbol_type: SymbolType::StructDeclaration, - usages: vec![], - resolved_type: format!("!{}", class_path.join("::")), - this_is_a_class: format!("py🔎{}", class_name), - this_class_derived_from: derived_from, - cpath: "".to_string(), - decl_line1: node.range().start_point.row + 1, - decl_line2: (node.range().start_point.row + 1).max(body_line1 - 1), - body_line1, - body_line2, - }); - - cx.ap.things.insert(class_path_str.clone(), Thing { - tline: node.range().start_point.row, - public: py_is_public(cx, &class_path_str), - thing_kind: 's', - type_resolved: format!("!{}", class_path_str), // this is about constructor in python, name of the class() is used as constructor, return type is the class - }); + cx.ap.defs.insert( + class_path_str.clone(), + AstDefinition { + official_path: class_path.clone(), + symbol_type: SymbolType::StructDeclaration, + usages: vec![], + resolved_type: format!("!{}", class_path.join("::")), + this_is_a_class: format!("py🔎{}", class_name), + this_class_derived_from: derived_from, + cpath: "".to_string(), + decl_line1: node.range().start_point.row + 1, + decl_line2: (node.range().start_point.row + 1).max(body_line1 - 1), + body_line1, + body_line2, + }, + ); + + cx.ap.things.insert( + class_path_str.clone(), + Thing { + tline: node.range().start_point.row, + public: py_is_public(cx, &class_path_str), + thing_kind: 's', + type_resolved: format!("!{}", class_path_str), // this is about constructor in python, name of the class() is used as constructor, return type is the class + }, + ); py_body(cx, &body.unwrap(), &class_path); // debug!(cx, "\nCLASS {:?}", cx.ap.defs.get(&class_path.join("::")).unwrap()); } - fn py_function<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { let mut body_line1 = usize::MAX; let mut body_line2 = 0; @@ -660,7 +821,7 @@ fn py_function<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { let mut params_node = None; let mut body = None; let mut returns = None; - for i in 0 .. node.child_count() { + for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { "identifier" => func_name = cx.ap.code[child.byte_range()].to_string(), @@ -669,10 +830,10 @@ fn py_function<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { body_line2 = body_line2.max(child.range().end_point.row + 1); body = Some(child); break; - }, + } "parameters" => params_node = Some(child), "type" => returns = Some(child), - "def" | "->" | ":" => {}, + "def" | "->" | ":" => {} _ => { let msg = cx.ap.error_report(&child, format!("py_function syntax")); debug!(cx, "{}", msg); @@ -716,98 +877,131 @@ fn py_function<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) { if param_name == "self" { type_resolved = path.join("::"); } - }, + } "typed_parameter" | "typed_default_parameter" | "default_parameter" => { if let Some(param_name_node) = param_node.child(0) { param_name = cx.ap.code[param_name_node.byte_range()].to_string(); } - type_resolved = py_type_generic(cx, param_node.child_by_field_name("type"), &func_path, 0); - let _defvalue_type = py_type_of_expr_creating_usages(cx, param_node.child_by_field_name("value"), &func_path); - }, + type_resolved = + py_type_generic(cx, param_node.child_by_field_name("type"), &func_path, 0); + let _defvalue_type = py_type_of_expr_creating_usages( + cx, + param_node.child_by_field_name("value"), + &func_path, + ); + } "," | "(" | ")" => continue, // "list_splat_pattern" for *args // "dictionary_splat_pattern" for **kwargs _ => { - let msg = cx.ap.error_report(¶m_node, format!("py_function parameter syntax")); + let msg = cx + .ap + .error_report(¶m_node, format!("py_function parameter syntax")); debug!(cx, "{}", msg); continue; } } if param_name.is_empty() { - let msg = cx.ap.error_report(¶m_node, format!("py_function nameless param")); + let msg = cx + .ap + .error_report(¶m_node, format!("py_function nameless param")); debug!(cx, "{}", msg); continue; } let param_path = [func_path.clone(), vec![param_name.clone()]].concat(); - cx.ap.things.insert(param_path.join("::"), Thing { - tline: param_node.range().start_point.row, - public: false, - thing_kind: 'p', - type_resolved, - }); + cx.ap.things.insert( + param_path.join("::"), + Thing { + tline: param_node.range().start_point.row, + public: false, + thing_kind: 'p', + type_resolved, + }, + ); } let ret_type = py_body(cx, &body.unwrap(), &func_path); - let (upd2, best_return_type) = py_add_a_thing(cx, &func_path_str, 'f', format!("!{}", ret_type), node); + let (upd2, best_return_type) = + py_add_a_thing(cx, &func_path_str, 'f', format!("!{}", ret_type), node); if upd1 || upd2 { - cx.ap.defs.insert(func_path_str, AstDefinition { - official_path: func_path.clone(), - symbol_type: SymbolType::FunctionDeclaration, - usages: vec![], - resolved_type: best_return_type, - this_is_a_class: "".to_string(), - this_class_derived_from: vec![], - cpath: "".to_string(), - decl_line1: node.range().start_point.row + 1, - decl_line2: (node.range().start_point.row + 1).max(body_line1 - 1), - body_line1, - body_line2, - }); + cx.ap.defs.insert( + func_path_str, + AstDefinition { + official_path: func_path.clone(), + symbol_type: SymbolType::FunctionDeclaration, + usages: vec![], + resolved_type: best_return_type, + this_is_a_class: "".to_string(), + this_class_derived_from: vec![], + cpath: "".to_string(), + decl_line1: node.range().start_point.row + 1, + decl_line2: (node.range().start_point.row + 1).max(body_line1 - 1), + body_line1, + body_line2, + }, + ); } } -fn py_body<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -> String -{ - let mut ret_type = "void".to_string(); // if there's no return clause, then it's None aka void +fn py_body<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -> String { + let mut ret_type = "void".to_string(); // if there's no return clause, then it's None aka void debug!(cx, "{}", node.kind()); cx.ap.reclevel += 1; match node.kind() { "import_statement" | "import_from_statement" => py_import(cx, node, path), - "if" | "else" | "elif" => { }, - "module" | "block" | "expression_statement" | "else_clause" | "if_statement" | "elif_clause" => { + "if" | "else" | "elif" => {} + "module" + | "block" + | "expression_statement" + | "else_clause" + | "if_statement" + | "elif_clause" => { for i in 0..node.child_count() { let child = node.child(i).unwrap(); match child.kind() { - "if" | "elif" | "else" | ":" | "integer" | "float" | "string" | "false" | "true" => { continue; } - "return_statement" => { ret_type = py_type_of_expr_creating_usages(cx, child.child(1), path); } - _ => { let _ = py_body(cx, &child, path); } + "if" | "elif" | "else" | ":" | "integer" | "float" | "string" | "false" + | "true" => { + continue; + } + "return_statement" => { + ret_type = py_type_of_expr_creating_usages(cx, child.child(1), path); + } + _ => { + let _ = py_body(cx, &child, path); + } } } - }, - "class_definition" => py_class(cx, node, path), // calls py_body recursively - "function_definition" => py_function(cx, node, path), // calls py_body recursively + } + "class_definition" => py_class(cx, node, path), // calls py_body recursively + "function_definition" => py_function(cx, node, path), // calls py_body recursively "decorated_definition" => { if let Some(definition) = node.child_by_field_name("definition") { match definition.kind() { "class_definition" => py_class(cx, &definition, path), "function_definition" => py_function(cx, &definition, path), _ => { - let msg = cx.ap.error_report(&definition, format!("decorated_definition with unknown definition type")); + let msg = cx.ap.error_report( + &definition, + format!("decorated_definition with unknown definition type"), + ); debug!(cx, "{}", msg); } } } - }, + } "assignment" => py_assignment(cx, node, path, false), "for_statement" => { py_assignment(cx, node, path, true); let _body_type = py_body(cx, &node.child_by_field_name("body").unwrap(), path); } "while_statement" => { - let _cond_type = py_type_of_expr_creating_usages(cx, node.child_by_field_name("condition"), path); + let _cond_type = + py_type_of_expr_creating_usages(cx, node.child_by_field_name("condition"), path); let _body_type = py_body(cx, &node.child_by_field_name("body").unwrap(), path); } - "call" | "comparison_operator" => { py_type_of_expr_creating_usages(cx, Some(node.clone()), path); } + "call" | "comparison_operator" => { + py_type_of_expr_creating_usages(cx, Some(node.clone()), path); + } _ => { let msg = cx.ap.error_report(node, format!("py_body syntax error")); debug!(cx, "{}", msg); @@ -818,10 +1012,11 @@ fn py_body<'a>(cx: &mut ContextPy, node: &Node<'a>, path: &Vec) -> Strin return ret_type; } -fn py_make_cx(code: &str) -> ContextPy -{ +fn py_make_cx(code: &str) -> ContextPy { let mut sitter = Parser::new(); - sitter.set_language(&tree_sitter_python::LANGUAGE.into()).unwrap(); + sitter + .set_language(&tree_sitter_python::LANGUAGE.into()) + .unwrap(); let cx = ContextPy { ap: ContextAnyParser { sitter, @@ -839,8 +1034,7 @@ fn py_make_cx(code: &str) -> ContextPy cx } -pub fn py_parse(code: &str) -> ContextPy -{ +pub fn py_parse(code: &str) -> ContextPy { let mut cx = py_make_cx(code); let tree = cx.ap.sitter.parse(code, None).unwrap(); let path = vec!["root".to_string()]; @@ -856,23 +1050,25 @@ pub fn py_parse(code: &str) -> ContextPy cx.ap.errs = AstErrorStats::default(); pass_n += 1; } - cx.ap.defs.insert("root".to_string(), AstDefinition { - official_path: vec!["root".to_string(), "".to_string()], - symbol_type: SymbolType::Module, - usages: vec![], - resolved_type: "".to_string(), - this_is_a_class: "".to_string(), - this_class_derived_from: vec![], - cpath: "".to_string(), - decl_line1: 1, - decl_line2: cx.ap.code.lines().count(), - body_line1: 0, - body_line2: 0, - }); + cx.ap.defs.insert( + "root".to_string(), + AstDefinition { + official_path: vec!["root".to_string(), "".to_string()], + symbol_type: SymbolType::Module, + usages: vec![], + resolved_type: "".to_string(), + this_is_a_class: "".to_string(), + this_class_derived_from: vec![], + cpath: "".to_string(), + decl_line1: 1, + decl_line2: cx.ap.code.lines().count(), + body_line1: 0, + body_line2: 0, + }, + ); return cx; } - // Run tests like this: // cargo test --no-default-features test_parse_py_goat_main -- --nocapture @@ -880,8 +1076,7 @@ pub fn py_parse(code: &str) -> ContextPy mod tests { use super::*; - fn py_parse4test(code: &str) -> String - { + fn py_parse4test(code: &str) -> String { let mut cx = py_parse(code); cx.ap.dump(); let _ = cx.ap.export_defs("test"); @@ -892,34 +1087,51 @@ mod tests { fn test_parse_py_jump_to_conclusions() { let code = include_str!("../../tests/emergency_frog_situation/jump_to_conclusions.py"); let annotated = py_parse4test(code); - std::fs::write("src/ast/alt_testsuite/jump_to_conclusions_annotated.py", annotated).expect("Unable to write file"); + std::fs::write( + "src/ast/alt_testsuite/jump_to_conclusions_annotated.py", + annotated, + ) + .expect("Unable to write file"); } #[test] fn test_parse_py_tort1() { let code = include_str!("alt_testsuite/py_torture1_attr.py"); let annotated = py_parse4test(code); - std::fs::write("src/ast/alt_testsuite/py_torture1_attr_annotated.py", annotated).expect("Unable to write file"); + std::fs::write( + "src/ast/alt_testsuite/py_torture1_attr_annotated.py", + annotated, + ) + .expect("Unable to write file"); } #[test] fn test_parse_py_tort2() { let code = include_str!("alt_testsuite/py_torture2_resolving.py"); let annotated = py_parse4test(code); - std::fs::write("src/ast/alt_testsuite/py_torture2_resolving_annotated.py", annotated).expect("Unable to write file"); + std::fs::write( + "src/ast/alt_testsuite/py_torture2_resolving_annotated.py", + annotated, + ) + .expect("Unable to write file"); } #[test] fn test_parse_py_goat_library() { let code = include_str!("alt_testsuite/py_goat_library.py"); let annotated = py_parse4test(code); - std::fs::write("src/ast/alt_testsuite/py_goat_library_annotated.py", annotated).expect("Unable to write file"); + std::fs::write( + "src/ast/alt_testsuite/py_goat_library_annotated.py", + annotated, + ) + .expect("Unable to write file"); } #[test] fn test_parse_py_goat_main() { let code = include_str!("alt_testsuite/py_goat_main.py"); let annotated = py_parse4test(code); - std::fs::write("src/ast/alt_testsuite/py_goat_main_annotated.py", annotated).expect("Unable to write file"); + std::fs::write("src/ast/alt_testsuite/py_goat_main_annotated.py", annotated) + .expect("Unable to write file"); } } diff --git a/refact-agent/engine/src/ast/treesitter/ast_instance_structs.rs b/refact-agent/engine/src/ast/treesitter/ast_instance_structs.rs index e8970c0b1..fb9347c14 100644 --- a/refact-agent/engine/src/ast/treesitter/ast_instance_structs.rs +++ b/refact-agent/engine/src/ast/treesitter/ast_instance_structs.rs @@ -87,7 +87,6 @@ impl TypeDef { } } - #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct AstSymbolFields { pub guid: Uuid, @@ -183,7 +182,8 @@ impl SymbolInformation { } pub fn get_declaration_content(&self, content: &String) -> io::Result { - let content = content.get(self.declaration_range.start_byte..self.declaration_range.end_byte); + let content = + content.get(self.declaration_range.start_byte..self.declaration_range.end_byte); if content.is_none() { return Err(io::Error::other("Incorrect declaration range")); } @@ -238,7 +238,6 @@ impl Default for AstSymbolFields { } } - #[async_trait] #[typetag::serde] #[dyn_partial_eq] @@ -280,7 +279,9 @@ pub trait AstSymbolInstance: Debug + Send + Sync + Any { &self.fields().language } - fn file_path(&self) -> &PathBuf { &self.fields().file_path } + fn file_path(&self) -> &PathBuf { + &self.fields().file_path + } fn is_type(&self) -> bool; @@ -360,9 +361,7 @@ pub trait AstSymbolInstance: Debug + Send + Sync + Any { fn remove_linked_guids(&mut self, guids: &HashSet) { let mut new_guids = vec![]; - for t in self - .types() - .iter() { + for t in self.types().iter() { if guids.contains(&t.guid.unwrap_or_default()) { new_guids.push(None); } else { @@ -389,7 +388,6 @@ pub trait AstSymbolInstance: Debug + Send + Sync + Any { // pub type AstSymbolInstanceRc = Rc>>; pub type AstSymbolInstanceArc = Arc>>; - /* StructDeclaration */ @@ -410,7 +408,6 @@ impl Default for StructDeclaration { } } - #[async_trait] #[typetag::serde] impl AstSymbolInstance for StructDeclaration { @@ -422,7 +419,9 @@ impl AstSymbolInstance for StructDeclaration { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn types(&self) -> Vec { let mut types: Vec = vec![]; @@ -480,15 +479,11 @@ impl AstSymbolInstance for StructDeclaration { fn temporary_types_cleanup(&mut self) { for t in self.inherited_types.iter_mut() { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }) + t.mutate_nested_types(|t| t.inference_info = None) } for t in self.template_types.iter_mut() { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }) + t.mutate_nested_types(|t| t.inference_info = None) } } @@ -496,14 +491,15 @@ impl AstSymbolInstance for StructDeclaration { true } - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::StructDeclaration } } - /* TypeAlias */ @@ -533,7 +529,9 @@ impl AstSymbolInstance for TypeAlias { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn types(&self) -> Vec { let mut types: Vec = vec![]; @@ -571,9 +569,7 @@ impl AstSymbolInstance for TypeAlias { fn temporary_types_cleanup(&mut self) { for t in self.types.iter_mut() { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }) + t.mutate_nested_types(|t| t.inference_info = None) } } @@ -581,14 +577,15 @@ impl AstSymbolInstance for TypeAlias { true } - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::TypeAlias } } - /* ClassFieldDeclaration */ @@ -618,7 +615,9 @@ impl AstSymbolInstance for ClassFieldDeclaration { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn types(&self) -> Vec { let mut types: Vec = vec![]; @@ -649,16 +648,16 @@ impl AstSymbolInstance for ClassFieldDeclaration { fn temporary_types_cleanup(&mut self) { self.type_.inference_info = None; - self.type_.mutate_nested_types(|t| { - t.inference_info = None - }) + self.type_.mutate_nested_types(|t| t.inference_info = None) } fn is_type(&self) -> bool { false } - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::ClassFieldDeclaration @@ -708,7 +707,9 @@ impl AstSymbolInstance for ImportDeclaration { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn types(&self) -> Vec { vec![] @@ -724,14 +725,15 @@ impl AstSymbolInstance for ImportDeclaration { false } - fn is_declaration(&self) -> bool { false } + fn is_declaration(&self) -> bool { + false + } fn symbol_type(&self) -> SymbolType { SymbolType::ImportDeclaration } } - /* VariableDefinition */ @@ -761,7 +763,9 @@ impl AstSymbolInstance for VariableDefinition { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn types(&self) -> Vec { let mut types: Vec = vec![]; @@ -792,32 +796,22 @@ impl AstSymbolInstance for VariableDefinition { fn temporary_types_cleanup(&mut self) { self.type_.inference_info = None; - self.type_.mutate_nested_types(|t| { - t.inference_info = None - }) + self.type_.mutate_nested_types(|t| t.inference_info = None) } fn is_type(&self) -> bool { false } - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::VariableDefinition } } - -/* -FunctionDeclaration -*/ -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct FunctionCaller { - pub inference_info: String, - pub guid: Option, -} - #[derive(Eq, Hash, PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct FunctionArg { pub name: String, @@ -863,7 +857,9 @@ impl AstSymbolInstance for FunctionDeclaration { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn is_type(&self) -> bool { false @@ -931,28 +927,25 @@ impl AstSymbolInstance for FunctionDeclaration { fn temporary_types_cleanup(&mut self) { if let Some(t) = &mut self.return_type { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }); + t.mutate_nested_types(|t| t.inference_info = None); } for t in self.args.iter_mut() { if let Some(t) = &mut t.type_ { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }); + t.mutate_nested_types(|t| t.inference_info = None); } } } - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::FunctionDeclaration } } - /* CommentDefinition */ @@ -980,7 +973,9 @@ impl AstSymbolInstance for CommentDefinition { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn is_type(&self) -> bool { false @@ -996,14 +991,15 @@ impl AstSymbolInstance for CommentDefinition { fn temporary_types_cleanup(&mut self) {} - fn is_declaration(&self) -> bool { true } + fn is_declaration(&self) -> bool { + true + } fn symbol_type(&self) -> SymbolType { SymbolType::CommentDefinition } } - /* FunctionCall */ @@ -1033,7 +1029,9 @@ impl AstSymbolInstance for FunctionCall { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn is_type(&self) -> bool { false @@ -1095,26 +1093,23 @@ impl AstSymbolInstance for FunctionCall { fn temporary_types_cleanup(&mut self) { if let Some(t) = &mut self.ast_fields.linked_decl_type { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }); + t.mutate_nested_types(|t| t.inference_info = None); } for t in self.template_types.iter_mut() { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }); + t.mutate_nested_types(|t| t.inference_info = None); } } - fn is_declaration(&self) -> bool { false } + fn is_declaration(&self) -> bool { + false + } fn symbol_type(&self) -> SymbolType { SymbolType::FunctionCall } } - /* VariableUsage */ @@ -1142,7 +1137,9 @@ impl AstSymbolInstance for VariableUsage { &mut self.ast_fields } - fn as_any_mut(&mut self) -> &mut dyn Any { self } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } fn is_type(&self) -> bool { false @@ -1184,13 +1181,13 @@ impl AstSymbolInstance for VariableUsage { fn temporary_types_cleanup(&mut self) { if let Some(t) = &mut self.ast_fields.linked_decl_type { t.inference_info = None; - t.mutate_nested_types(|t| { - t.inference_info = None - }); + t.mutate_nested_types(|t| t.inference_info = None); } } - fn is_declaration(&self) -> bool { false } + fn is_declaration(&self) -> bool { + false + } fn symbol_type(&self) -> SymbolType { SymbolType::VariableUsage diff --git a/refact-agent/engine/src/ast/treesitter/mod.rs b/refact-agent/engine/src/ast/treesitter/mod.rs index 042afe792..6eb498d76 100644 --- a/refact-agent/engine/src/ast/treesitter/mod.rs +++ b/refact-agent/engine/src/ast/treesitter/mod.rs @@ -1,6 +1,6 @@ +pub mod ast_instance_structs; +pub mod file_ast_markup; pub mod language_id; pub mod parsers; -pub mod structs; -pub mod ast_instance_structs; pub mod skeletonizer; -pub mod file_ast_markup; +pub mod structs; diff --git a/refact-agent/engine/src/ast/treesitter/parsers.rs b/refact-agent/engine/src/ast/treesitter/parsers.rs index 8804a88eb..76be471e7 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers.rs @@ -6,18 +6,16 @@ use tracing::error; use crate::ast::treesitter::ast_instance_structs::AstSymbolInstanceArc; use crate::ast::treesitter::language_id::LanguageId; - +mod cpp; +mod java; +mod js; +mod kotlin; pub(crate) mod python; pub(crate) mod rust; #[cfg(test)] mod tests; -mod utils; -mod java; -mod kotlin; -mod cpp; mod ts; -mod js; - +mod utils; #[derive(Debug, PartialEq, Eq)] pub struct ParserError { @@ -36,7 +34,9 @@ fn internal_error(err: E) -> ParserError { } } -pub(crate) fn get_ast_parser(language_id: LanguageId) -> Result, ParserError> { +pub(crate) fn get_ast_parser( + language_id: LanguageId, +) -> Result, ParserError> { match language_id { LanguageId::Rust => { let parser = rust::RustParser::new()?; @@ -71,26 +71,37 @@ pub(crate) fn get_ast_parser(language_id: LanguageId) -> Result Err(ParserError { - message: "Unsupported language id: ".to_string() + &other.to_string() + message: "Unsupported language id: ".to_string() + &other.to_string(), }), } } - -pub fn get_ast_parser_by_filename(filename: &PathBuf) -> Result<(Box, LanguageId), ParserError> { - let suffix = filename.extension().and_then(|e| e.to_str()).unwrap_or("").to_lowercase(); +pub fn get_ast_parser_by_filename( + filename: &PathBuf, +) -> Result<(Box, LanguageId), ParserError> { + let suffix = filename + .extension() + .and_then(|e| e.to_str()) + .unwrap_or("") + .to_lowercase(); let maybe_language_id = get_language_id_by_filename(filename); match maybe_language_id { Some(language_id) => { let parser = get_ast_parser(language_id)?; Ok((parser, language_id)) } - None => Err(ParserError { message: format!("not supported {}", suffix) }), + None => Err(ParserError { + message: format!("not supported {}", suffix), + }), } } pub fn get_language_id_by_filename(filename: &PathBuf) -> Option { - let suffix = filename.extension().and_then(|e| e.to_str()).unwrap_or("").to_lowercase(); + let suffix = filename + .extension() + .and_then(|e| e.to_str()) + .unwrap_or("") + .to_lowercase(); match suffix.as_str() { "cpp" | "cc" | "cxx" | "c++" | "c" | "h" | "hpp" | "hxx" | "hh" => Some(LanguageId::Cpp), "inl" | "inc" | "tpp" | "tpl" => Some(LanguageId::Cpp), @@ -101,7 +112,6 @@ pub fn get_language_id_by_filename(filename: &PathBuf) -> Option { "rs" => Some(LanguageId::Rust), "ts" => Some(LanguageId::TypeScript), "tsx" => Some(LanguageId::TypeScriptReact), - _ => None + _ => None, } } - diff --git a/refact-agent/engine/src/ast/treesitter/parsers/cpp.rs b/refact-agent/engine/src/ast/treesitter/parsers/cpp.rs index 848bc1458..d562af738 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/cpp.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/cpp.rs @@ -9,7 +9,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, + VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_guid}; @@ -18,35 +22,183 @@ pub(crate) struct CppParser { pub parser: Parser, } - static CPP_KEYWORDS: [&str; 92] = [ - "alignas", "alignof", "and", "and_eq", "asm", "auto", "bitand", "bitor", - "bool", "break", "case", "catch", "char", "char8_t", "char16_t", "char32_t", - "class", "compl", "concept", "const", "consteval", "constexpr", "constinit", - "const_cast", "continue", "co_await", "co_return", "co_yield", "decltype", "default", - "delete", "do", "double", "dynamic_cast", "else", "enum", "explicit", "export", "extern", - "false", "float", "for", "friend", "goto", "if", "inline", "int", "long", "mutable", - "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", - "private", "protected", "public", "register", "reinterpret_cast", "requires", "return", - "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", - "template", "this", "thread_local", "throw", "true", "try", "typedef", "typeid", "typename", - "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq" + "alignas", + "alignof", + "and", + "and_eq", + "asm", + "auto", + "bitand", + "bitor", + "bool", + "break", + "case", + "catch", + "char", + "char8_t", + "char16_t", + "char32_t", + "class", + "compl", + "concept", + "const", + "consteval", + "constexpr", + "constinit", + "const_cast", + "continue", + "co_await", + "co_return", + "co_yield", + "decltype", + "default", + "delete", + "do", + "double", + "dynamic_cast", + "else", + "enum", + "explicit", + "export", + "extern", + "false", + "float", + "for", + "friend", + "goto", + "if", + "inline", + "int", + "long", + "mutable", + "namespace", + "new", + "noexcept", + "not", + "not_eq", + "nullptr", + "operator", + "or", + "or_eq", + "private", + "protected", + "public", + "register", + "reinterpret_cast", + "requires", + "return", + "short", + "signed", + "sizeof", + "static", + "static_assert", + "static_cast", + "struct", + "switch", + "template", + "this", + "thread_local", + "throw", + "true", + "try", + "typedef", + "typeid", + "typename", + "union", + "unsigned", + "using", + "virtual", + "void", + "volatile", + "wchar_t", + "while", + "xor", + "xor_eq", ]; static SYSTEM_HEADERS: [&str; 79] = [ - "algorithm", "bitset", "cassert", "cctype", "cerrno", "cfenv", "cfloat", "chrono", "cinttypes", - "climits", "clocale", "cmath", "codecvt", "complex", "condition_variable", "csetjmp", - "csignal", "cstdarg", "cstdbool", "cstddef", "cstdint", "cstdio", "cstdlib", "cstring", "ctgmath", - "ctime", "cuchar", "cwchar", "cwctype", "deque", "exception", "filesystem", "forward_list", "fstream", - "functional", "future", "initializer_list", "iomanip", "ios", "iosfwd", "iostream", "istream", - "iterator", "limits", "list", "locale", "map", "memory", "mutex", "new", "numeric", "optional", - "ostream", "queue", "random", "ratio", "regex", "scoped_allocator", "set", "shared_mutex", - "sstream", "stack", "stdexcept", "streambuf", "string", "string_view", "system_error", "thread", - "tuple", "type_traits", "unordered_map", "unordered_set", "utility", "valarray", "variant", "vector", - "version", "wchar.h", "wctype.h", + "algorithm", + "bitset", + "cassert", + "cctype", + "cerrno", + "cfenv", + "cfloat", + "chrono", + "cinttypes", + "climits", + "clocale", + "cmath", + "codecvt", + "complex", + "condition_variable", + "csetjmp", + "csignal", + "cstdarg", + "cstdbool", + "cstddef", + "cstdint", + "cstdio", + "cstdlib", + "cstring", + "ctgmath", + "ctime", + "cuchar", + "cwchar", + "cwctype", + "deque", + "exception", + "filesystem", + "forward_list", + "fstream", + "functional", + "future", + "initializer_list", + "iomanip", + "ios", + "iosfwd", + "iostream", + "istream", + "iterator", + "limits", + "list", + "locale", + "map", + "memory", + "mutex", + "new", + "numeric", + "optional", + "ostream", + "queue", + "random", + "ratio", + "regex", + "scoped_allocator", + "set", + "shared_mutex", + "sstream", + "stack", + "stdexcept", + "streambuf", + "string", + "string_view", + "system_error", + "thread", + "tuple", + "type_traits", + "unordered_map", + "unordered_set", + "utility", + "valarray", + "variant", + "vector", + "version", + "wchar.h", + "wctype.h", ]; - pub fn parse_type(parent: &Node, code: &str) -> Option { let kind = parent.kind(); let text = code.slice(parent.byte_range()).to_string(); @@ -108,8 +260,8 @@ impl CppParser { &mut self, info: &CandidateInfo<'a>, code: &str, - candidates: &mut VecDeque>) - -> Vec { + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = StructDeclaration::default(); @@ -122,13 +274,22 @@ impl CppParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let mut template_parent_node = info.node.parent(); while let Some(parent) = template_parent_node { match parent.kind() { - "enum_specifier" | "class_specifier" | "struct_specifier" | - "template_declaration" | "namespace_definition" | "function_definition" => { + "enum_specifier" + | "class_specifier" + | "struct_specifier" + | "template_declaration" + | "namespace_definition" + | "function_definition" => { break; } &_ => {} @@ -142,21 +303,29 @@ impl CppParser { start_byte: decl.ast_fields.full_range.start_byte, end_byte: name.end_byte(), start_point: decl.ast_fields.full_range.start_point, - end_point: name.end_position() + end_point: name.end_position(), }; } else { decl.ast_fields.name = format!("anon-{}", decl.ast_fields.guid); } if let Some(template_parent) = template_parent_node { - symbols.extend(self.find_error_usages(&template_parent, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &template_parent, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if template_parent.kind() == "template_declaration" { if let Some(parameters) = template_parent.child_by_field_name("parameters") { for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(arg) = parse_type(&child, code) { decl.template_types.push(arg); } @@ -167,13 +336,21 @@ impl CppParser { // find base classes for i in 0..info.node.child_count() { let base_class_clause = info.node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&base_class_clause, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &base_class_clause, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if base_class_clause.kind() == "base_class_clause" { for i in 0..base_class_clause.child_count() { let child = base_class_clause.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(base_class) = parse_type(&child, code) { decl.inherited_types.push(base_class); } @@ -182,7 +359,7 @@ impl CppParser { start_byte: decl.ast_fields.full_range.start_byte, end_byte: base_class_clause.end_byte(), start_point: decl.ast_fields.full_range.start_point, - end_point: base_class_clause.end_position() + end_point: base_class_clause.end_position(), }; } } @@ -199,11 +376,18 @@ impl CppParser { symbols } - fn parse_variable_definition<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_variable_definition<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut type_ = TypeDef::default(); if let Some(type_node) = info.node.child_by_field_name("type") { - if vec!["class_specifier", "struct_specifier", "enum_specifier"].contains(&type_node.kind()) { + if vec!["class_specifier", "struct_specifier", "enum_specifier"] + .contains(&type_node.kind()) + { let usages = self.parse_struct_declaration(info, code, candidates); type_.guid = Some(*usages.last().unwrap().read().guid()); type_.name = Some(usages.last().unwrap().read().name().to_string()); @@ -215,15 +399,29 @@ impl CppParser { } } - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut cursor = info.node.walk(); for child in info.node.children_by_field_name("declarator", &mut cursor) { - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, - &info.parent_guid)); - let (symbols_l, _, name_l, namespace_l) = - self.parse_declaration(&child, code, &info.ast_fields.file_path, - &info.parent_guid, info.ast_fields.is_error, candidates); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); + let (symbols_l, _, name_l, namespace_l) = self.parse_declaration( + &child, + code, + &info.ast_fields.file_path, + &info.parent_guid, + info.ast_fields.is_error, + candidates, + ); symbols.extend(symbols_l); let mut decl = VariableDefinition::default(); @@ -242,7 +440,12 @@ impl CppParser { symbols } - fn parse_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut dtype = TypeDef::default(); if let Some(type_node) = info.node.child_by_field_name("type") { @@ -253,9 +456,15 @@ impl CppParser { // symbols.extend(self.find_error_usages(&parent, code, path, parent_guid)); let mut cursor = info.node.walk(); - let declarators = info.node.children_by_field_name("declarator", &mut cursor).collect::>(); + let declarators = info + .node + .children_by_field_name("declarator", &mut cursor) + .collect::>(); cursor = info.node.walk(); - let default_values = info.node.children_by_field_name("default_value", &mut cursor).collect::>(); + let default_values = info + .node + .children_by_field_name("default_value", &mut cursor) + .collect::>(); let match_declarators_to_default_value = || { let mut result: Vec<(Node, Option)> = vec![]; @@ -270,7 +479,9 @@ impl CppParser { let default_value_range = default_value.range(); if let Some(next) = next_mb { let next_range = next.range(); - if default_value_range.start_byte > current_range.end_byte && default_value_range.end_byte < next_range.start_byte { + if default_value_range.start_byte > current_range.end_byte + && default_value_range.end_byte < next_range.start_byte + { default_value_candidate = Some(default_value.clone()); break; } @@ -286,11 +497,15 @@ impl CppParser { result }; - for (declarator, default_value_mb) in match_declarators_to_default_value() { - let (symbols_l, _, name_l, _) = - self.parse_declaration(&declarator, code, &info.ast_fields.file_path, - &info.parent_guid, info.ast_fields.is_error, candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &declarator, + code, + &info.ast_fields.file_path, + &info.parent_guid, + info.ast_fields.is_error, + candidates, + ); if name_l.is_empty() { continue; } @@ -314,7 +529,8 @@ impl CppParser { parent_guid: info.parent_guid.clone(), }); - decl.type_.inference_info = Some(code.slice(default_value.byte_range()).to_string()); + decl.type_.inference_info = + Some(code.slice(default_value.byte_range()).to_string()); } decl.type_ = local_dtype; symbols.push(Arc::new(RwLock::new(Box::new(decl)))); @@ -322,7 +538,12 @@ impl CppParser { symbols } - fn parse_enum_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_enum_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = ClassFieldDeclaration::default(); decl.ast_fields.language = info.ast_fields.language; @@ -332,7 +553,12 @@ impl CppParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &info.parent_guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -349,21 +575,22 @@ impl CppParser { symbols } - fn parse_declaration<'a>(&mut self, - parent: &Node<'a>, - code: &str, - path: &PathBuf, - parent_guid: &Uuid, - is_error: bool, - candidates: &mut VecDeque>) - -> (Vec, Vec, String, String) { + fn parse_declaration<'a>( + &mut self, + parent: &Node<'a>, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + candidates: &mut VecDeque>, + ) -> (Vec, Vec, String, String) { let mut symbols: Vec = Default::default(); let mut types: Vec = Default::default(); let mut name: String = String::new(); let mut namespace: String = String::new(); #[cfg(test)] #[allow(unused)] - let text = code.slice(parent.byte_range()); + let text = code.slice(parent.byte_range()); let kind = parent.kind(); match kind { "identifier" | "field_identifier" => { @@ -375,13 +602,18 @@ impl CppParser { symbols.extend(self.find_error_usages(&name_node, code, path, &parent_guid)); } if let Some(arguments_node) = parent.child_by_field_name("arguments") { - symbols.extend(self.find_error_usages(&arguments_node, code, path, &parent_guid)); + symbols.extend(self.find_error_usages( + &arguments_node, + code, + path, + &parent_guid, + )); self.find_error_usages(&arguments_node, code, path, &parent_guid); for i in 0..arguments_node.child_count() { let child = arguments_node.child(i).unwrap(); #[cfg(test)] #[allow(unused)] - let text = code.slice(child.byte_range()); + let text = code.slice(child.byte_range()); symbols.extend(self.find_error_usages(&child, code, path, &parent_guid)); self.find_error_usages(&child, code, path, &parent_guid); if let Some(dtype) = parse_type(&child, code) { @@ -392,14 +624,24 @@ impl CppParser { } "init_declarator" => { if let Some(declarator) = parent.child_by_field_name("declarator") { - let (symbols_l, _, name_l, _) = - self.parse_declaration(&declarator, code, path, parent_guid, is_error, candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &declarator, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); name = name_l; } if let Some(value) = parent.child_by_field_name("value") { candidates.push_back(CandidateInfo { - ast_fields: AstSymbolFields::from_data(LanguageId::Cpp, path.clone(), is_error), + ast_fields: AstSymbolFields::from_data( + LanguageId::Cpp, + path.clone(), + is_error, + ), node: value, parent_guid: parent_guid.clone(), }); @@ -409,26 +651,50 @@ impl CppParser { "qualified_identifier" => { if let Some(scope) = parent.child_by_field_name("scope") { symbols.extend(self.find_error_usages(&scope, code, path, &parent_guid)); - let (symbols_l, types_l, name_l, namespace_l) = - self.parse_declaration(&scope, code, path, parent_guid, is_error, candidates); + let (symbols_l, types_l, name_l, namespace_l) = self.parse_declaration( + &scope, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); types.extend(types_l); - namespace = vec![namespace, name_l, namespace_l].iter().filter(|x| !x.is_empty()).join("::"); + namespace = vec![namespace, name_l, namespace_l] + .iter() + .filter(|x| !x.is_empty()) + .join("::"); } if let Some(name_node) = parent.child_by_field_name("name") { symbols.extend(self.find_error_usages(&name_node, code, path, &parent_guid)); - let (symbols_l, types_l, name_l, namespace_l) = - self.parse_declaration(&name_node, code, path, parent_guid, is_error, candidates); + let (symbols_l, types_l, name_l, namespace_l) = self.parse_declaration( + &name_node, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); types.extend(types_l); name = name_l; - namespace = vec![namespace, namespace_l].iter().filter(|x| !x.is_empty()).join("::"); + namespace = vec![namespace, namespace_l] + .iter() + .filter(|x| !x.is_empty()) + .join("::"); } } "pointer_declarator" => { if let Some(declarator) = parent.child_by_field_name("declarator") { - let (symbols_l, _, name_l, _) = - self.parse_declaration(&declarator, code, path, parent_guid, is_error, candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &declarator, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); name = name_l; } @@ -437,8 +703,14 @@ impl CppParser { for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); symbols.extend(self.find_error_usages(&child, code, path, &parent_guid)); - let (symbols_l, _, name_l, _) = - self.parse_declaration(&child, code, path, parent_guid, is_error, candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &child, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); if !name_l.is_empty() { name = name_l; @@ -452,8 +724,14 @@ impl CppParser { } } if let Some(declarator) = parent.child_by_field_name("declarator") { - let (symbols_l, _, name_l, _) = - self.parse_declaration(&declarator, code, path, parent_guid, is_error, candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &declarator, + code, + path, + parent_guid, + is_error, + candidates, + ); symbols.extend(symbols_l); name = name_l; } @@ -464,7 +742,12 @@ impl CppParser { (symbols, types, name, namespace) } - pub fn parse_function_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_function_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields.language = info.ast_fields.language; @@ -476,13 +759,22 @@ impl CppParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); let mut template_parent_node = info.node.parent(); while let Some(parent) = template_parent_node { match parent.kind() { - "enum_specifier" | "class_specifier" | "struct_specifier" | - "template_declaration" | "namespace_definition" | "function_definition" => { + "enum_specifier" + | "class_specifier" + | "struct_specifier" + | "template_declaration" + | "namespace_definition" + | "function_definition" => { break; } &_ => {} @@ -494,38 +786,69 @@ impl CppParser { if let Some(parameters) = template_parent.child_by_field_name("parameters") { for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - let (_, types_l, _, _) = - self.parse_declaration(&child, code, &decl.ast_fields.file_path, - &decl.ast_fields.guid, decl.ast_fields.is_error, - candidates); + let (_, types_l, _, _) = self.parse_declaration( + &child, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + decl.ast_fields.is_error, + candidates, + ); decl.template_types.extend(types_l); - symbols.extend(self.find_error_usages(&child, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); } } } } if let Some(declarator) = info.node.child_by_field_name("declarator") { - symbols.extend(self.find_error_usages(&declarator, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &declarator, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(declarator) = declarator.child_by_field_name("declarator") { - symbols.extend(self.find_error_usages(&declarator, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); - let (symbols_l, types_l, name_l, namespace_l) = - self.parse_declaration(&declarator, code, &decl.ast_fields.file_path, - &decl.ast_fields.guid, decl.ast_fields.is_error, - candidates); + symbols.extend(self.find_error_usages( + &declarator, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); + let (symbols_l, types_l, name_l, namespace_l) = self.parse_declaration( + &declarator, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + decl.ast_fields.is_error, + candidates, + ); symbols.extend(symbols_l); decl.ast_fields.name = name_l; decl.ast_fields.namespace = namespace_l; decl.template_types = types_l; } if let Some(parameters) = declarator.child_by_field_name("parameters") { - symbols.extend(self.find_error_usages(¶meters, code, &decl.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &decl.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); match child.kind() { "parameter_declaration" => { let mut arg = FunctionArg::default(); @@ -533,10 +856,14 @@ impl CppParser { arg.type_ = parse_type(&type_, code); } if let Some(declarator) = child.child_by_field_name("declarator") { - let (symbols_l, _, name_l, _) = - self.parse_declaration(&declarator, code, &decl.ast_fields.file_path, - &decl.ast_fields.guid, decl.ast_fields.is_error, - candidates); + let (symbols_l, _, name_l, _) = self.parse_declaration( + &declarator, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + decl.ast_fields.is_error, + candidates, + ); symbols.extend(symbols_l); arg.name = name_l; } @@ -545,7 +872,6 @@ impl CppParser { &_ => {} } } - } } @@ -581,7 +907,12 @@ impl CppParser { symbols } - pub fn parse_call_expression<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_call_expression<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields.language = info.ast_fields.language; @@ -595,17 +926,26 @@ impl CppParser { } decl.ast_fields.caller_guid = Some(get_guid()); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(function) = info.node.child_by_field_name("function") { - symbols.extend(self.find_error_usages(&function, code, &info.ast_fields.file_path, - &info.parent_guid)); + symbols.extend(self.find_error_usages( + &function, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); match function.kind() { "identifier" => { decl.ast_fields.name = code.slice(function.byte_range()).to_string(); } "field_expression" => { - if let Some(field) = function.child_by_field_name("field") { + if let Some(field) = function.child_by_field_name("field") { decl.ast_fields.name = code.slice(field.byte_range()).to_string(); } if let Some(argument) = function.child_by_field_name("argument") { @@ -626,8 +966,12 @@ impl CppParser { } } if let Some(arguments) = info.node.child_by_field_name("arguments") { - symbols.extend(self.find_error_usages(&arguments, code, &info.ast_fields.file_path, - &info.parent_guid)); + symbols.extend(self.find_error_usages( + &arguments, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut new_ast_fields = info.ast_fields.clone(); new_ast_fields.caller_guid = None; @@ -644,7 +988,13 @@ impl CppParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -655,7 +1005,13 @@ impl CppParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "identifier" | "field_identifier" => { @@ -703,13 +1059,18 @@ impl CppParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = info.node.kind(); #[cfg(test)] #[allow(unused)] - let text = code.slice(info.node.byte_range()); + let text = code.slice(info.node.byte_range()); match kind { "enum_specifier" | "class_specifier" | "struct_specifier" => { symbols.extend(self.parse_struct_declaration(info, code, candidates)); @@ -764,7 +1125,12 @@ impl CppParser { node: argument, parent_guid: info.parent_guid.clone(), }); - symbols.extend(self.find_error_usages(&argument, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &argument, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); } symbols.push(Arc::new(RwLock::new(Box::new(usage)))); } @@ -801,12 +1167,11 @@ impl CppParser { match path.kind() { "system_lib_string" | "string_literal" => { let mut name = code.slice(path.byte_range()).to_string(); - name = name.slice(1..name.len()-1).to_string(); + name = name.slice(1..name.len() - 1).to_string(); def.path_components = name.split("/").map(|x| x.to_string()).collect(); if SYSTEM_HEADERS.contains(&&name.as_str()) { def.import_type = ImportType::System; } - } &_ => {} } @@ -867,8 +1232,10 @@ impl CppParser { let symbols_l = self.parse_usages_(&candidate, code, &mut candidates); symbols.extend(symbols_l); } - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); for symbol in symbols.iter_mut() { let guid = symbol.read().guid().clone(); if let Some(parent_guid) = symbol.read().parent_guid() { @@ -881,10 +1248,20 @@ impl CppParser { #[cfg(test)] for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } symbols @@ -898,5 +1275,3 @@ impl AstLanguageParser for CppParser { symbols } } - - diff --git a/refact-agent/engine/src/ast/treesitter/parsers/java.rs b/refact-agent/engine/src/ast/treesitter/parsers/java.rs index 42637fa7e..9859781e5 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/java.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/java.rs @@ -11,7 +11,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, + VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_guid}; @@ -21,16 +25,59 @@ pub(crate) struct JavaParser { } static JAVA_KEYWORDS: [&str; 50] = [ - "abstract", "assert", "boolean", "break", "byte", "case", "catch", "char", "class", "const", - "continue", "default", "do", "double", "else", "enum", "extends", "final", "finally", "float", - "for", "if", "goto", "implements", "import", "instanceof", "int", "interface", "long", "native", - "new", "package", "private", "protected", "public", "return", "short", "static", "strictfp", "super", - "switch", "synchronized", "this", "throw", "throws", "transient", "try", "void", "volatile", "while" + "abstract", + "assert", + "boolean", + "break", + "byte", + "case", + "catch", + "char", + "class", + "const", + "continue", + "default", + "do", + "double", + "else", + "enum", + "extends", + "final", + "finally", + "float", + "for", + "if", + "goto", + "implements", + "import", + "instanceof", + "int", + "interface", + "long", + "native", + "new", + "package", + "private", + "protected", + "public", + "return", + "short", + "static", + "strictfp", + "super", + "switch", + "synchronized", + "this", + "throw", + "throws", + "transient", + "try", + "void", + "volatile", + "while", ]; -static SYSTEM_MODULES: [&str; 2] = [ - "java", "jdk", -]; +static SYSTEM_MODULES: [&str; 2] = ["java", "jdk"]; pub fn parse_type(parent: &Node, code: &str) -> Option { let kind = parent.kind(); @@ -140,7 +187,8 @@ pub fn parse_type(parent: &Node, code: &str) -> Option { if result.is_empty() { result = code.slice(child.byte_range()).to_string(); } else { - result = result + "." + &*code.slice(child.byte_range()).to_string(); + result = + result + "." + &*code.slice(child.byte_range()).to_string(); } } "scoped_type_identifier" => { @@ -214,7 +262,6 @@ fn parse_function_arg(parent: &Node, code: &str) -> FunctionArg { arg } - impl JavaParser { pub fn new() -> Result { let mut parser = Parser::new(); @@ -242,14 +289,24 @@ impl JavaParser { decl.ast_fields.guid = get_guid(); decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name_node) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name_node.byte_range()).to_string(); } if let Some(node) = info.node.child_by_field_name("superclass") { - symbols.extend(self.find_error_usages(&node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..node.child_count() { let child = node.child(i).unwrap(); if let Some(dtype) = parse_type(&child, code) { @@ -258,10 +315,20 @@ impl JavaParser { } } if let Some(node) = info.node.child_by_field_name("interfaces") { - symbols.extend(self.find_error_usages(&node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..node.child_count() { let child = node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); match child.kind() { "type_list" => { for i in 0..child.child_count() { @@ -277,7 +344,6 @@ impl JavaParser { } if let Some(_) = info.node.child_by_field_name("type_parameters") {} - if let Some(body) = info.node.child_by_field_name("body") { decl.ast_fields.definition_range = body.range(); decl.ast_fields.declaration_range = Range { @@ -297,21 +363,41 @@ impl JavaParser { symbols } - fn parse_variable_definition<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_variable_definition<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut type_ = TypeDef::default(); if let Some(type_node) = info.node.child_by_field_name("type") { - symbols.extend(self.find_error_usages(&type_node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &type_node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(dtype) = parse_type(&type_node, code) { type_ = dtype; } } - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); match child.kind() { "variable_declarator" => { let local_dtype = type_.clone(); @@ -328,8 +414,14 @@ impl JavaParser { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(value) = child.child_by_field_name("value") { - symbols.extend(self.find_error_usages(&value, code, &info.ast_fields.file_path, &info.parent_guid)); - decl.type_.inference_info = Some(code.slice(value.byte_range()).to_string()); + symbols.extend(self.find_error_usages( + &value, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); + decl.type_.inference_info = + Some(code.slice(value.byte_range()).to_string()); candidates.push_back(CandidateInfo { ast_fields: decl.ast_fields.clone(), node: value, @@ -337,7 +429,12 @@ impl JavaParser { }); } if let Some(dimensions) = child.child_by_field_name("dimensions") { - symbols.extend(self.find_error_usages(&dimensions, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &dimensions, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); decl.type_ = TypeDef { name: Some(code.slice(dimensions.byte_range()).to_string()), inference_info: None, @@ -359,17 +456,32 @@ impl JavaParser { symbols } - fn parse_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut dtype = TypeDef::default(); if let Some(type_node) = info.node.child_by_field_name("type") { - symbols.extend(self.find_error_usages(&type_node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &type_node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(type_) = parse_type(&type_node, code) { dtype = type_; } } - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); @@ -389,8 +501,14 @@ impl JavaParser { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(value) = child.child_by_field_name("value") { - symbols.extend(self.find_error_usages(&value, code, &info.ast_fields.file_path, &info.parent_guid)); - decl.type_.inference_info = Some(code.slice(value.byte_range()).to_string()); + symbols.extend(self.find_error_usages( + &value, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); + decl.type_.inference_info = + Some(code.slice(value.byte_range()).to_string()); candidates.push_back(CandidateInfo { ast_fields: info.ast_fields.clone(), node: value, @@ -398,7 +516,12 @@ impl JavaParser { }); } if let Some(dimensions) = child.child_by_field_name("dimensions") { - symbols.extend(self.find_error_usages(&dimensions, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &dimensions, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); decl.type_ = TypeDef { name: Some(code.slice(dimensions.byte_range()).to_string()), inference_info: None, @@ -419,7 +542,12 @@ impl JavaParser { symbols } - fn parse_enum_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_enum_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = ClassFieldDeclaration::default(); decl.ast_fields.language = info.ast_fields.language; @@ -429,13 +557,23 @@ impl JavaParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(arguments) = info.node.child_by_field_name("arguments") { - symbols.extend(self.find_error_usages(&arguments, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &arguments, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); decl.type_.inference_info = Some(code.slice(arguments.byte_range()).to_string()); for i in 0..arguments.child_count() { let child = arguments.child(i).unwrap(); @@ -453,20 +591,30 @@ impl JavaParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = info.node.kind(); #[cfg(test)] #[allow(unused)] - let text = code.slice(info.node.byte_range()); + let text = code.slice(info.node.byte_range()); match kind { - "class_declaration" | "interface_declaration" | "enum_declaration" | "annotation_type_declaration" => { + "class_declaration" + | "interface_declaration" + | "enum_declaration" + | "annotation_type_declaration" => { symbols.extend(self.parse_struct_declaration(info, code, candidates)); } "local_variable_declaration" => { symbols.extend(self.parse_variable_definition(info, code, candidates)); } - "method_declaration" | "annotation_type_element_declaration" | "constructor_declaration" => { + "method_declaration" + | "annotation_type_element_declaration" + | "constructor_declaration" => { symbols.extend(self.parse_function_declaration(info, code, candidates)); } "method_invocation" | "object_creation_expression" => { @@ -573,7 +721,13 @@ impl JavaParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -584,7 +738,13 @@ impl JavaParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "identifier" => { @@ -633,7 +793,12 @@ impl JavaParser { symbols } - pub fn parse_function_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_function_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields.language = info.ast_fields.language; @@ -645,14 +810,24 @@ impl JavaParser { decl.ast_fields.is_error = info.ast_fields.is_error; decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name_node) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name_node.byte_range()).to_string(); } if let Some(parameters_node) = info.node.child_by_field_name("parameters") { - symbols.extend(self.find_error_usages(¶meters_node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters_node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); decl.ast_fields.declaration_range = Range { start_byte: decl.ast_fields.full_range.start_byte, end_byte: parameters_node.end_byte(), @@ -664,14 +839,24 @@ impl JavaParser { let mut function_args = vec![]; for idx in 0..params_len { let child = parameters_node.child(idx).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); function_args.push(parse_function_arg(&child, code)); } decl.args = function_args; } if let Some(return_type) = info.node.child_by_field_name("type") { decl.return_type = parse_type(&return_type, code); - symbols.extend(self.find_error_usages(&return_type, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &return_type, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); } if let Some(body_node) = info.node.child_by_field_name("body") { @@ -695,7 +880,12 @@ impl JavaParser { symbols } - pub fn parse_call_expression<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_call_expression<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields.language = info.ast_fields.language; @@ -709,14 +899,24 @@ impl JavaParser { } decl.ast_fields.caller_guid = Some(get_guid()); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(type_) = info.node.child_by_field_name("type") { - symbols.extend(self.find_error_usages(&type_, code, &info.ast_fields.file_path, &info.parent_guid)); - if let Some(dtype) = parse_type(&type_, code) { + symbols.extend(self.find_error_usages( + &type_, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); + if let Some(dtype) = parse_type(&type_, code) { if let Some(name) = dtype.name { decl.ast_fields.name = name; } else { @@ -727,8 +927,12 @@ impl JavaParser { } } if let Some(arguments) = info.node.child_by_field_name("arguments") { - symbols.extend(self.find_error_usages(&arguments, code, &info.ast_fields.file_path, - &info.parent_guid)); + symbols.extend(self.find_error_usages( + &arguments, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut new_ast_fields = info.ast_fields.clone(); new_ast_fields.caller_guid = None; for i in 0..arguments.child_count() { @@ -768,8 +972,10 @@ impl JavaParser { let symbols_l = self.parse_usages_(&candidate, code, &mut candidates); symbols.extend(symbols_l); } - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); for symbol in symbols.iter_mut() { let guid = symbol.read().guid().clone(); if let Some(parent_guid) = symbol.read().parent_guid() { @@ -782,10 +988,20 @@ impl JavaParser { #[cfg(test)] for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } symbols diff --git a/refact-agent/engine/src/ast/treesitter/parsers/js.rs b/refact-agent/engine/src/ast/treesitter/parsers/js.rs index b3482f677..982dbc3eb 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/js.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/js.rs @@ -8,7 +8,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, + VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_guid}; @@ -23,29 +27,25 @@ fn parse_type_from_value(parent: &Node, code: &str) -> Option { let kind = parent.kind(); let text = code.slice(parent.byte_range()).to_string(); return match kind { - "number" | "null" | "string" | "true" | "false" | "undefined" => { - Some(TypeDef { - name: None, - inference_info: Some(text), - inference_info_guid: None, - is_pod: true, - namespace: "".to_string(), - guid: None, - nested_types: vec![], - }) - } - &_ => { - Some(TypeDef { - name: None, - inference_info: Some(text), - inference_info_guid: None, - is_pod: false, - namespace: "".to_string(), - guid: None, - nested_types: vec![], - }) - } - } + "number" | "null" | "string" | "true" | "false" | "undefined" => Some(TypeDef { + name: None, + inference_info: Some(text), + inference_info_guid: None, + is_pod: true, + namespace: "".to_string(), + guid: None, + nested_types: vec![], + }), + &_ => Some(TypeDef { + name: None, + inference_info: Some(text), + inference_info_guid: None, + is_pod: false, + namespace: "".to_string(), + guid: None, + nested_types: vec![], + }), + }; } fn parse_type(parent: &Node, code: &str) -> Option { @@ -151,8 +151,8 @@ impl JSParser { info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>, - name_from_var: Option) - -> Vec { + name_from_var: Option, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = StructDeclaration::default(); @@ -163,7 +163,12 @@ impl JSParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -176,12 +181,21 @@ impl JSParser { // find base classes for i in 0..info.node.child_count() { let class_heritage = info.node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&class_heritage, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &class_heritage, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if class_heritage.kind() == "class_heritage" { for i in 0..class_heritage.child_count() { let extends_clause = class_heritage.child(i).unwrap(); - symbols.extend(self.find_error_usages(&extends_clause, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &extends_clause, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(dtype) = parse_type(&extends_clause, code) { decl.inherited_types.push(dtype); } @@ -230,9 +244,19 @@ impl JSParser { symbols } - fn parse_variable_definition<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_variable_definition<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut decl = VariableDefinition::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -264,7 +288,12 @@ impl JSParser { symbols } - fn parse_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = ClassFieldDeclaration::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -299,11 +328,10 @@ impl JSParser { pub fn parse_function_declaration<'a>( &mut self, info: &CandidateInfo<'a>, - code: &str, candidates: - &mut VecDeque>, + code: &str, + candidates: &mut VecDeque>, name_from_var: Option, - ) - -> Vec { + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -313,7 +341,12 @@ impl JSParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -330,10 +363,20 @@ impl JSParser { start_point: decl.ast_fields.full_range.start_point, end_point: parameters.end_position(), }; - symbols.extend(self.find_error_usages(¶meters, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let kind = child.kind(); match kind { "identifier" => { @@ -388,8 +431,8 @@ impl JSParser { &mut self, info: &CandidateInfo<'a>, code: &str, - candidates: &mut VecDeque>) - -> Vec { + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -401,7 +444,12 @@ impl JSParser { } decl.ast_fields.caller_guid = Some(get_guid()); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(function) = info.node.child_by_field_name("function") { let kind = function.kind(); @@ -460,7 +508,13 @@ impl JSParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -471,7 +525,13 @@ impl JSParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "identifier" /*| "field_identifier"*/ => { @@ -519,7 +579,12 @@ impl JSParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = info.node.kind(); @@ -759,8 +824,10 @@ impl JSParser { symbols.extend(symbols_l); } - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); for symbol in symbols.iter_mut() { let guid = symbol.read().guid().clone(); if let Some(parent_guid) = symbol.read().parent_guid() { @@ -775,10 +842,20 @@ impl JSParser { use itertools::Itertools; for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } } @@ -793,5 +870,3 @@ impl AstLanguageParser for JSParser { symbols } } - - diff --git a/refact-agent/engine/src/ast/treesitter/parsers/kotlin.rs b/refact-agent/engine/src/ast/treesitter/parsers/kotlin.rs index b29752c92..0dacda858 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/kotlin.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/kotlin.rs @@ -11,7 +11,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, + VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_guid}; @@ -21,22 +25,78 @@ pub(crate) struct KotlinParser { } static KOTLIN_KEYWORDS: [&str; 64] = [ - "abstract", "actual", "annotation", "as", "break", "by", "catch", "class", "companion", "const", - "constructor", "continue", "crossinline", "data", "do", "dynamic", "else", "enum", "expect", "external", - "final", "finally", "for", "fun", "get", "if", "import", "in", "infix", "init", "inline", "inner", - "interface", "internal", "is", "lateinit", "noinline", "object", "open", "operator", "out", "override", - "package", "private", "protected", "public", "reified", "return", "sealed", "set", "super", "suspend", - "tailrec", "this", "throw", "try", "typealias", "typeof", "val", "var", "vararg", "when", "where", "while" + "abstract", + "actual", + "annotation", + "as", + "break", + "by", + "catch", + "class", + "companion", + "const", + "constructor", + "continue", + "crossinline", + "data", + "do", + "dynamic", + "else", + "enum", + "expect", + "external", + "final", + "finally", + "for", + "fun", + "get", + "if", + "import", + "in", + "infix", + "init", + "inline", + "inner", + "interface", + "internal", + "is", + "lateinit", + "noinline", + "object", + "open", + "operator", + "out", + "override", + "package", + "private", + "protected", + "public", + "reified", + "return", + "sealed", + "set", + "super", + "suspend", + "tailrec", + "this", + "throw", + "try", + "typealias", + "typeof", + "val", + "var", + "vararg", + "when", + "where", + "while", ]; -static SYSTEM_MODULES: [&str; 2] = [ - "kotlin", "java", -]; +static SYSTEM_MODULES: [&str; 2] = ["kotlin", "java"]; pub fn parse_type(parent: &Node, code: &str) -> Option { let kind = parent.kind(); let text = code.slice(parent.byte_range()).to_string(); - + match kind { "type_identifier" | "identifier" | "user_type" => { return Some(TypeDef { @@ -130,9 +190,9 @@ pub fn parse_type(parent: &Node, code: &str) -> Option { let child = parent.child(i).unwrap(); if child.kind() == "type_identifier" { parts.push(code.slice(child.byte_range()).to_string()); - } - } - + } + } + if !parts.is_empty() { decl.name = Some(parts.join(".")); } @@ -148,7 +208,7 @@ pub fn parse_type(parent: &Node, code: &str) -> Option { guid: None, nested_types: vec![], }; - + if let Some(parameters) = parent.child_by_field_name("parameters") { for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); @@ -157,13 +217,13 @@ pub fn parse_type(parent: &Node, code: &str) -> Option { } } } - + if let Some(return_type) = parent.child_by_field_name("return_type") { if let Some(t) = parse_type(&return_type, code) { decl.nested_types.push(t); } } - + return Some(decl); } _ => {} @@ -173,14 +233,14 @@ pub fn parse_type(parent: &Node, code: &str) -> Option { fn parse_function_arg(parent: &Node, code: &str) -> FunctionArg { let mut arg = FunctionArg::default(); - + if let Some(name) = parent.child_by_field_name("name") { arg.name = code.slice(name.byte_range()).to_string(); } if let Some(type_node) = parent.child_by_field_name("type") { if let Some(dtype) = parse_type(&type_node, code) { - arg.type_ = Some(dtype); + arg.type_ = Some(dtype); } } @@ -196,7 +256,12 @@ impl KotlinParser { Ok(KotlinParser { parser }) } - fn parse_class_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_class_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = StructDeclaration::default(); @@ -209,7 +274,12 @@ impl KotlinParser { decl.ast_fields.guid = get_guid(); decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name_node) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name_node.byte_range()).to_string(); @@ -224,7 +294,12 @@ impl KotlinParser { } if let Some(node) = info.node.child_by_field_name("supertype") { - symbols.extend(self.find_error_usages(&node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..node.child_count() { let child = node.child(i).unwrap(); if let Some(dtype) = parse_type(&child, code) { @@ -232,12 +307,22 @@ impl KotlinParser { } } } - + if let Some(node) = info.node.child_by_field_name("delegation_specifiers") { - symbols.extend(self.find_error_usages(&node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..node.child_count() { let child = node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); match child.kind() { "type_list" => { for i in 0..child.child_count() { @@ -251,7 +336,7 @@ impl KotlinParser { } } } - + if let Some(_) = info.node.child_by_field_name("type_parameters") {} if let Some(body) = info.node.child_by_field_name("body") { @@ -296,8 +381,12 @@ impl KotlinParser { } else { for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - if child.kind() == "class_body" || child.kind() == "body" || child.kind() == "members" || - child.kind() == "{" || child.kind().contains("body") { + if child.kind() == "class_body" + || child.kind() == "body" + || child.kind() == "members" + || child.kind() == "{" + || child.kind().contains("body") + { candidates.push_back(CandidateInfo { ast_fields: decl.ast_fields.clone(), node: child, @@ -311,20 +400,30 @@ impl KotlinParser { symbols } - fn parse_function_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_function_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = FunctionDeclaration::default(); - decl.ast_fields.language = info.ast_fields.language; - decl.ast_fields.full_range = info.node.range(); + decl.ast_fields.language = info.ast_fields.language; + decl.ast_fields.full_range = info.node.range(); decl.ast_fields.declaration_range = info.node.range(); decl.ast_fields.definition_range = info.node.range(); - decl.ast_fields.file_path = info.ast_fields.file_path.clone(); - decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); - decl.ast_fields.guid = get_guid(); - decl.ast_fields.is_error = info.ast_fields.is_error; + decl.ast_fields.file_path = info.ast_fields.file_path.clone(); + decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); + decl.ast_fields.guid = get_guid(); + decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name_node) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name_node.byte_range()).to_string(); @@ -339,7 +438,12 @@ impl KotlinParser { } if let Some(parameters_node) = info.node.child_by_field_name("parameters") { - symbols.extend(self.find_error_usages(¶meters_node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters_node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); decl.ast_fields.declaration_range = Range { start_byte: decl.ast_fields.full_range.start_byte, end_byte: parameters_node.end_byte(), @@ -350,7 +454,12 @@ impl KotlinParser { let mut function_args = vec![]; for i in 0..parameters_node.child_count() { let child = parameters_node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if child.kind() == "parameter" { function_args.push(parse_function_arg(&child, code)); } @@ -360,7 +469,12 @@ impl KotlinParser { if let Some(return_type) = info.node.child_by_field_name("type") { decl.return_type = parse_type(&return_type, code); - symbols.extend(self.find_error_usages(&return_type, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &return_type, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); } if let Some(body_node) = info.node.child_by_field_name("body") { @@ -371,7 +485,7 @@ impl KotlinParser { start_point: decl.ast_fields.full_range.start_point, end_point: decl.ast_fields.definition_range.start_point, }; - + for i in 0..body_node.child_count() { let child = body_node.child(i).unwrap(); candidates.push_back(CandidateInfo { @@ -398,25 +512,30 @@ impl KotlinParser { symbols } - fn parse_property_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_property_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; - + let mut decl = ClassFieldDeclaration::default(); - decl.ast_fields.language = info.ast_fields.language; - decl.ast_fields.full_range = info.node.range(); - decl.ast_fields.declaration_range = info.node.range(); - decl.ast_fields.file_path = info.ast_fields.file_path.clone(); - decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); - decl.ast_fields.guid = get_guid(); - decl.ast_fields.is_error = info.ast_fields.is_error; + decl.ast_fields.language = info.ast_fields.language; + decl.ast_fields.full_range = info.node.range(); + decl.ast_fields.declaration_range = info.node.range(); + decl.ast_fields.file_path = info.ast_fields.file_path.clone(); + decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); + decl.ast_fields.guid = get_guid(); + decl.ast_fields.is_error = info.ast_fields.is_error; if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } else { for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - + if child.kind() == "variable_declaration" { for j in 0..child.child_count() { let subchild = child.child(j).unwrap(); @@ -442,13 +561,16 @@ impl KotlinParser { } else { for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - + if child.kind() == "variable_declaration" { for j in 0..child.child_count() { let subchild = child.child(j).unwrap(); - if subchild.kind() == "function_type" || subchild.kind() == "type_identifier" || - subchild.kind() == "nullable_type" || subchild.kind() == "generic_type" || - subchild.kind() == "user_type" { + if subchild.kind() == "function_type" + || subchild.kind() == "type_identifier" + || subchild.kind() == "nullable_type" + || subchild.kind() == "generic_type" + || subchild.kind() == "user_type" + { if let Some(dtype) = parse_type(&subchild, code) { decl.type_ = dtype; break; @@ -458,9 +580,12 @@ impl KotlinParser { if decl.type_.name.is_some() { break; } - } else if child.kind() == "function_type" || child.kind() == "type_identifier" || - child.kind() == "nullable_type" || child.kind() == "generic_type" || - child.kind() == "user_type" { + } else if child.kind() == "function_type" + || child.kind() == "type_identifier" + || child.kind() == "nullable_type" + || child.kind() == "generic_type" + || child.kind() == "user_type" + { if let Some(dtype) = parse_type(&child, code) { decl.type_ = dtype; break; @@ -471,11 +596,11 @@ impl KotlinParser { if let Some(initializer) = info.node.child_by_field_name("initializer") { decl.type_.inference_info = Some(code.slice(initializer.byte_range()).to_string()); - + for i in 0..initializer.child_count() { let child = initializer.child(i).unwrap(); if child.kind() == "lambda_literal" || child.kind() == "lambda_expression" { - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: { let mut ast_fields = AstSymbolFields::default(); ast_fields.language = info.ast_fields.language; @@ -522,7 +647,12 @@ impl KotlinParser { symbols } - fn parse_variable_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, _candidates: &mut VecDeque>) -> Vec { + fn parse_variable_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + _candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut type_ = TypeDef::default(); @@ -537,20 +667,21 @@ impl KotlinParser { match child.kind() { "variable_declarator" => { let mut decl = VariableDefinition::default(); - decl.ast_fields.language = info.ast_fields.language; - decl.ast_fields.full_range = info.node.range(); - decl.ast_fields.file_path = info.ast_fields.file_path.clone(); - decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); - decl.ast_fields.guid = get_guid(); - decl.ast_fields.is_error = info.ast_fields.is_error; + decl.ast_fields.language = info.ast_fields.language; + decl.ast_fields.full_range = info.node.range(); + decl.ast_fields.file_path = info.ast_fields.file_path.clone(); + decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); + decl.ast_fields.guid = get_guid(); + decl.ast_fields.is_error = info.ast_fields.is_error; decl.type_ = type_.clone(); if let Some(name) = child.child_by_field_name("name") { - decl.ast_fields.name = code.slice(name.byte_range()).to_string(); - } + decl.ast_fields.name = code.slice(name.byte_range()).to_string(); + } if let Some(value) = child.child_by_field_name("value") { - decl.type_.inference_info = Some(code.slice(value.byte_range()).to_string()); + decl.type_.inference_info = + Some(code.slice(value.byte_range()).to_string()); } symbols.push(Arc::new(RwLock::new(Box::new(decl)))); @@ -562,10 +693,15 @@ impl KotlinParser { symbols } - fn parse_identifier<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, _candidates: &mut VecDeque>) -> Vec { + fn parse_identifier<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + _candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let name = code.slice(info.node.byte_range()).to_string(); - + if KOTLIN_KEYWORDS.contains(&name.as_str()) { return symbols; } @@ -586,7 +722,12 @@ impl KotlinParser { symbols } - fn parse_call_expression<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_call_expression<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = FunctionCall::default(); @@ -601,13 +742,23 @@ impl KotlinParser { } decl.ast_fields.caller_guid = Some(get_guid()); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(type_) = info.node.child_by_field_name("type") { - symbols.extend(self.find_error_usages(&type_, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &type_, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(dtype) = parse_type(&type_, code) { if let Some(name) = dtype.name { decl.ast_fields.name = name; @@ -619,83 +770,106 @@ impl KotlinParser { } } if let Some(arguments) = info.node.child_by_field_name("arguments") { - symbols.extend(self.find_error_usages(&arguments, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &arguments, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut new_ast_fields = info.ast_fields.clone(); new_ast_fields.caller_guid = None; for i in 0..arguments.child_count() { let child = arguments.child(i).unwrap(); - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: new_ast_fields.clone(), - node: child, - parent_guid: info.parent_guid.clone(), - }); - } + node: child, + parent_guid: info.parent_guid.clone(), + }); } + } if let Some(object) = info.node.child_by_field_name("receiver") { - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: decl.ast_fields.clone(), node: object, - parent_guid: info.parent_guid.clone(), - }); - } + parent_guid: info.parent_guid.clone(), + }); + } symbols.push(Arc::new(RwLock::new(Box::new(decl)))); symbols - } + } - fn parse_annotation<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, _candidates: &mut VecDeque>) -> Vec { + fn parse_annotation<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + _candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; - let mut usage = VariableUsage::default(); - - usage.ast_fields.name = code.slice(info.node.byte_range()).to_string(); - usage.ast_fields.language = info.ast_fields.language; - usage.ast_fields.full_range = info.node.range(); - usage.ast_fields.file_path = info.ast_fields.file_path.clone(); - usage.ast_fields.parent_guid = Some(info.parent_guid.clone()); - usage.ast_fields.guid = get_guid(); - usage.ast_fields.is_error = info.ast_fields.is_error; - + let mut usage = VariableUsage::default(); + + usage.ast_fields.name = code.slice(info.node.byte_range()).to_string(); + usage.ast_fields.language = info.ast_fields.language; + usage.ast_fields.full_range = info.node.range(); + usage.ast_fields.file_path = info.ast_fields.file_path.clone(); + usage.ast_fields.parent_guid = Some(info.parent_guid.clone()); + usage.ast_fields.guid = get_guid(); + usage.ast_fields.is_error = info.ast_fields.is_error; + if usage.ast_fields.name.starts_with('@') { usage.ast_fields.name = usage.ast_fields.name[1..].to_string(); } - - symbols.push(Arc::new(RwLock::new(Box::new(usage)))); + + symbols.push(Arc::new(RwLock::new(Box::new(usage)))); symbols - } + } - fn parse_field_access<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_field_access<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; - - if let (Some(object), Some(field)) = (info.node.child_by_field_name("receiver"), info.node.child_by_field_name("field")) { - let mut usage = VariableUsage::default(); - usage.ast_fields.name = code.slice(field.byte_range()).to_string(); - usage.ast_fields.language = info.ast_fields.language; - usage.ast_fields.full_range = info.node.range(); - usage.ast_fields.file_path = info.ast_fields.file_path.clone(); - usage.ast_fields.guid = get_guid(); - usage.ast_fields.parent_guid = Some(info.parent_guid.clone()); - usage.ast_fields.caller_guid = Some(get_guid()); - if let Some(caller_guid) = info.ast_fields.caller_guid.clone() { - usage.ast_fields.guid = caller_guid; - } - candidates.push_back(CandidateInfo { - ast_fields: usage.ast_fields.clone(), - node: object, - parent_guid: info.parent_guid.clone(), - }); - symbols.push(Arc::new(RwLock::new(Box::new(usage)))); - } - + + if let (Some(object), Some(field)) = ( + info.node.child_by_field_name("receiver"), + info.node.child_by_field_name("field"), + ) { + let mut usage = VariableUsage::default(); + usage.ast_fields.name = code.slice(field.byte_range()).to_string(); + usage.ast_fields.language = info.ast_fields.language; + usage.ast_fields.full_range = info.node.range(); + usage.ast_fields.file_path = info.ast_fields.file_path.clone(); + usage.ast_fields.guid = get_guid(); + usage.ast_fields.parent_guid = Some(info.parent_guid.clone()); + usage.ast_fields.caller_guid = Some(get_guid()); + if let Some(caller_guid) = info.ast_fields.caller_guid.clone() { + usage.ast_fields.guid = caller_guid; + } + candidates.push_back(CandidateInfo { + ast_fields: usage.ast_fields.clone(), + node: object, + parent_guid: info.parent_guid.clone(), + }); + symbols.push(Arc::new(RwLock::new(Box::new(usage)))); + } + symbols } - fn parse_lambda_expression<'a>(&mut self, info: &CandidateInfo<'a>, _code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_lambda_expression<'a>( + &mut self, + info: &CandidateInfo<'a>, + _code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let symbols: Vec = vec![]; - + if let Some(parameters) = info.node.child_by_field_name("parameters") { for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: { let mut ast_fields = AstSymbolFields::default(); ast_fields.language = info.ast_fields.language; @@ -707,16 +881,16 @@ impl KotlinParser { ast_fields.caller_guid = None; ast_fields }, - node: child, - parent_guid: info.parent_guid.clone(), - }); + node: child, + parent_guid: info.parent_guid.clone(), + }); } } - + if let Some(body) = info.node.child_by_field_name("body") { for i in 0..body.child_count() { let child = body.child(i).unwrap(); - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: { let mut ast_fields = AstSymbolFields::default(); ast_fields.language = info.ast_fields.language; @@ -728,16 +902,22 @@ impl KotlinParser { ast_fields.caller_guid = None; ast_fields }, - node: child, - parent_guid: info.parent_guid.clone(), + node: child, + parent_guid: info.parent_guid.clone(), }); - } } - + } + symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = vec![]; for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -748,7 +928,13 @@ impl KotlinParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = vec![]; match parent.kind() { "identifier" => { @@ -768,7 +954,10 @@ impl KotlinParser { symbols.push(Arc::new(RwLock::new(Box::new(usage)))); } "field_access" | "navigation_expression" => { - if let (Some(object), Some(field)) = (parent.child_by_field_name("receiver"), parent.child_by_field_name("field")) { + if let (Some(object), Some(field)) = ( + parent.child_by_field_name("receiver"), + parent.child_by_field_name("field"), + ) { let usages = self.parse_error_usages(&object, code, path, parent_guid); let mut usage = VariableUsage::default(); usage.ast_fields.name = code.slice(field.byte_range()).to_string(); @@ -796,22 +985,44 @@ impl KotlinParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let kind = info.node.kind(); - - + match kind { - "class_declaration" | "interface_declaration" | "enum_declaration" | "object_declaration" => { - self.parse_class_declaration(info, code, candidates) - } - "function_declaration" | "fun" | "method_declaration" | "method" | "constructor" | "init" | "getter" | "setter" | - "function" | "member_function" | "class_function" | "method_definition" | "function_definition" => { - self.parse_function_declaration(info, code, candidates) - } - "property_declaration" | "val" | "var" | "property" | "mutable_property" | "immutable_property" | "lateinit" | - "val_declaration" | "var_declaration" | "const_declaration" | "member_property" | "class_property" => { - self.parse_property_declaration(info, code, candidates) - } + "class_declaration" + | "interface_declaration" + | "enum_declaration" + | "object_declaration" => self.parse_class_declaration(info, code, candidates), + "function_declaration" + | "fun" + | "method_declaration" + | "method" + | "constructor" + | "init" + | "getter" + | "setter" + | "function" + | "member_function" + | "class_function" + | "method_definition" + | "function_definition" => self.parse_function_declaration(info, code, candidates), + "property_declaration" + | "val" + | "var" + | "property" + | "mutable_property" + | "immutable_property" + | "lateinit" + | "val_declaration" + | "var_declaration" + | "const_declaration" + | "member_property" + | "class_property" => self.parse_property_declaration(info, code, candidates), "companion_object" => { let symbols: Vec = vec![]; for i in 0..info.node.child_count() { @@ -843,15 +1054,11 @@ impl KotlinParser { "lambda_literal" | "lambda_expression" => { self.parse_lambda_expression(info, code, candidates) } - "identifier" => { - self.parse_identifier(info, code, candidates) - } + "identifier" => self.parse_identifier(info, code, candidates), "field_access" | "navigation_expression" => { self.parse_field_access(info, code, candidates) } - "annotation" => { - self.parse_annotation(info, code, candidates) - } + "annotation" => self.parse_annotation(info, code, candidates), "import_declaration" => { let mut symbols: Vec = vec![]; let mut def = ImportDeclaration::default(); @@ -860,7 +1067,7 @@ impl KotlinParser { def.ast_fields.file_path = info.ast_fields.file_path.clone(); def.ast_fields.parent_guid = Some(info.parent_guid.clone()); def.ast_fields.guid = get_guid(); - + for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); if ["scoped_identifier", "identifier"].contains(&child.kind()) { @@ -873,10 +1080,10 @@ impl KotlinParser { } } } - + symbols.push(Arc::new(RwLock::new(Box::new(def)))); - symbols - } + symbols + } "block_comment" | "line_comment" => { let mut symbols: Vec = vec![]; let mut def = CommentDefinition::default(); @@ -911,7 +1118,7 @@ impl KotlinParser { let symbols: Vec = vec![]; for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: { let mut ast_fields = AstSymbolFields::default(); ast_fields.language = info.ast_fields.language; @@ -923,17 +1130,17 @@ impl KotlinParser { ast_fields.caller_guid = None; ast_fields }, - node: child, - parent_guid: info.parent_guid.clone(), - }); - } + node: child, + parent_guid: info.parent_guid.clone(), + }); + } symbols } _ => { let symbols: Vec = vec![]; for i in 0..info.node.child_count() { let child = info.node.child(i).unwrap(); - candidates.push_back(CandidateInfo { + candidates.push_back(CandidateInfo { ast_fields: { let mut ast_fields = AstSymbolFields::default(); ast_fields.language = info.ast_fields.language; @@ -946,10 +1153,10 @@ impl KotlinParser { ast_fields }, node: child, - parent_guid: info.parent_guid.clone(), - }); - } - symbols + parent_guid: info.parent_guid.clone(), + }); + } + symbols } } } @@ -972,7 +1179,8 @@ impl KotlinParser { symbols.extend(symbols_l); } - let guid_to_symbol_map: HashMap = symbols.iter() + let guid_to_symbol_map: HashMap = symbols + .iter() .map(|s| (s.read().guid().clone(), s.clone())) .collect(); @@ -988,10 +1196,20 @@ impl KotlinParser { #[cfg(test)] for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } symbols @@ -1004,4 +1222,4 @@ impl AstLanguageParser for KotlinParser { let symbols = self.parse_(&tree.root_node(), code, path); symbols } -} \ No newline at end of file +} diff --git a/refact-agent/engine/src/ast/treesitter/parsers/python.rs b/refact-agent/engine/src/ast/treesitter/parsers/python.rs index 75d4f364e..99a584c47 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/python.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/python.rs @@ -10,7 +10,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Point, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, SymbolInformation, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, + SymbolInformation, TypeDef, VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_children_guids, get_guid}; @@ -18,32 +22,211 @@ use crate::ast::treesitter::skeletonizer::SkeletonFormatter; use crate::ast::treesitter::structs::SymbolType; static PYTHON_MODULES: [&str; 203] = [ - "abc", "aifc", "argparse", "array", "asynchat", "asyncio", "asyncore", "atexit", "audioop", - "base64", "bdb", "binascii", "binhex", "bisect", "builtins", "bz2", "calendar", "cgi", "cgitb", - "chunk", "cmath", "cmd", "code", "codecs", "codeop", "collections", "colorsys", "compileall", - "concurrent", "configparser", "contextlib", "contextvars", "copy", "copyreg", "crypt", "csv", - "ctypes", "curses", "datetime", "dbm", "decimal", "difflib", "dis", "distutils", "doctest", - "email", "encodings", "ensurepip", "enum", "errno", "faulthandler", "fcntl", "filecmp", - "fileinput", "fnmatch", "formatter", "fractions", "ftplib", "functools", "gc", "getopt", - "getpass", "gettext", "glob", "grp", "gzip", "hashlib", "heapq", "hmac", "html", "http", - "idlelib", "imaplib", "imghdr", "imp", "importlib", "inspect", "io", "ipaddress", "itertools", - "json", "keyword", "lib2to3", "linecache", "locale", "logging", "lzma", "macpath", "mailbox", - "mailcap", "marshal", "math", "mimetypes", "mmap", "modulefinder", "msilib", "msvcrt", - "multiprocessing", "netrc", "nntplib", "numbers", "operator", "optparse", "os", "ossaudiodev", - "parser", "pathlib", "pdb", "pickle", "pickletools", "pipes", "pkgutil", "platform", "plistlib", - "poplib", "posix", "pprint", "profile", "pstats", "pty", "pwd", "py_compile", "pyclbr", "pydoc", - "queue", "quopri", "random", "re", "readline", "reprlib", "resource", "rlcompleter", "runpy", - "sched", "secrets", "select", "selectors", "shelve", "shlex", "shutil", "signal", "site", "smtpd", - "smtplib", "sndhdr", "socket", "socketserver", "spwd", "sqlite3", "ssl", "stat", "statistics", - "string", "stringprep", "struct", "subprocess", "sunau", "symbol", "symtable", "sys", "sysconfig", - "syslog", "tabnanny", "tarfile", "telnetlib", "tempfile", "termios", "test", "textwrap", - "threading", "time", "timeit", "tkinter", "token", "tokenize", "trace", "traceback", - "tracemalloc", "tty", "turtle", "turtledemo", "types", "typing", "unicodedata", "unittest", - "urllib", "uu", "uuid", "venv", "warnings", "wave", "weakref", "webbrowser", "winreg", "winsound", - "wsgiref", "xdrlib", "xml", "xmlrpc", "zipapp", "zipfile", "zipimport", "zoneinfo" + "abc", + "aifc", + "argparse", + "array", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "idlelib", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "macpath", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nntplib", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zoneinfo", ]; - pub(crate) struct PythonParser { pub parser: Parser, } @@ -200,10 +383,10 @@ fn parse_function_arg(parent: &Node, code: &str) -> Vec { const SPECIAL_SYMBOLS: &str = "{}(),.;_|&"; const PYTHON_KEYWORDS: [&'static str; 35] = [ - "False", "None", "True", "and", "as", "assert", "async", "await", "break", "class", - "continue", "def", "del", "elif", "else", "except", "finally", "for", "from", "global", - "if", "import", "in", "is", "lambda", "nonlocal", "not", "or", "pass", "raise", - "return", "try", "while", "with", "yield" + "False", "None", "True", "and", "as", "assert", "async", "await", "break", "class", "continue", + "def", "del", "elif", "else", "except", "finally", "for", "from", "global", "if", "import", + "in", "is", "lambda", "nonlocal", "not", "or", "pass", "raise", "return", "try", "while", + "with", "yield", ]; impl PythonParser { @@ -215,7 +398,12 @@ impl PythonParser { Ok(PythonParser { parser }) } - pub fn parse_struct_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_struct_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = StructDeclaration::default(); @@ -226,7 +414,12 @@ impl PythonParser { decl.ast_fields.guid = get_guid(); decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(parent_node) = info.node.parent() { if parent_node.kind() == "decorated_definition" { @@ -250,7 +443,12 @@ impl PythonParser { decl.inherited_types.push(dtype); } } - symbols.extend(self.find_error_usages(&superclasses, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &superclasses, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); decl.ast_fields.declaration_range = Range { start_byte: decl.ast_fields.full_range.start_byte, end_byte: superclasses.end_byte(), @@ -273,7 +471,12 @@ impl PythonParser { symbols } - fn parse_assignment<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_assignment<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut is_class_field = false; { let mut parent_mb = info.node.parent(); @@ -293,7 +496,6 @@ impl PythonParser { } } - let mut symbols: Vec = vec![]; if let Some(right) = info.node.child_by_field_name("right") { candidates.push_back(CandidateInfo { @@ -310,10 +512,12 @@ impl PythonParser { }); } - let mut candidates_: VecDeque<(Option, Option, Option)> = VecDeque::from(vec![ - (info.node.child_by_field_name("left"), - info.node.child_by_field_name("type"), - info.node.child_by_field_name("right"))]); + let mut candidates_: VecDeque<(Option, Option, Option)> = + VecDeque::from(vec![( + info.node.child_by_field_name("left"), + info.node.child_by_field_name("type"), + info.node.child_by_field_name("right"), + )]); let mut right_for_all = false; while !candidates_.is_empty() { let (left_mb, type_mb, right_mb) = candidates_.pop_front().unwrap(); @@ -352,9 +556,11 @@ impl PythonParser { } } if let Some(right) = right_mb { - decl.type_.inference_info = Some(code.slice(right.byte_range()).to_string()); - decl.type_.is_pod = vec!["integer", "string", "float", "false", "true"] - .contains(&right.kind()); + decl.type_.inference_info = + Some(code.slice(right.byte_range()).to_string()); + decl.type_.is_pod = + vec!["integer", "string", "float", "false", "true"] + .contains(&right.kind()); } symbols.push(Arc::new(RwLock::new(Box::new(decl)))); } @@ -399,7 +605,12 @@ impl PythonParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = info.node.kind(); let _text = code.slice(info.node.byte_range()); @@ -439,7 +650,8 @@ impl PythonParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); decl.ast_fields.name = text.to_string(); - decl.type_.inference_info = Some(code.slice(value.byte_range()).to_string()); + decl.type_.inference_info = + Some(code.slice(value.byte_range()).to_string()); decl.ast_fields.is_error = info.ast_fields.is_error; symbols.push(Arc::new(RwLock::new(Box::new(decl)))); } @@ -477,7 +689,9 @@ impl PythonParser { let attribute = info.node.child_by_field_name("attribute").unwrap(); let name = code.slice(attribute.byte_range()).to_string(); let mut def = VariableDefinition::default(); - def.type_ = info.node.parent() + def.type_ = info + .node + .parent() .map(|x| x.child_by_field_name("type")) .flatten() .map(|x| parse_type(&x, code)) @@ -543,24 +757,36 @@ impl PythonParser { let base_path = code.slice(module_name.byte_range()).to_string(); if base_path.starts_with("..") { base_path_component.push("..".to_string()); - base_path_component.extend(base_path.slice(2..base_path.len()).split(".") - .map(|x| x.to_string()) - .filter(|x| !x.is_empty()) - .collect::>()); + base_path_component.extend( + base_path + .slice(2..base_path.len()) + .split(".") + .map(|x| x.to_string()) + .filter(|x| !x.is_empty()) + .collect::>(), + ); } else if base_path.starts_with(".") { base_path_component.push(".".to_string()); - base_path_component.extend(base_path.slice(1..base_path.len()).split(".") - .map(|x| x.to_string()) - .filter(|x| !x.is_empty()) - .collect::>()); + base_path_component.extend( + base_path + .slice(1..base_path.len()) + .split(".") + .map(|x| x.to_string()) + .filter(|x| !x.is_empty()) + .collect::>(), + ); } else { - base_path_component = base_path.split(".") + base_path_component = base_path + .split(".") .map(|x| x.to_string()) .filter(|x| !x.is_empty()) .collect(); } } else { - base_path_component = code.slice(module_name.byte_range()).to_string().split(".") + base_path_component = code + .slice(module_name.byte_range()) + .to_string() + .split(".") .map(|x| x.to_string()) .filter(|x| !x.is_empty()) .collect(); @@ -577,11 +803,21 @@ impl PythonParser { let mut alias: Option = None; match child.kind() { "dotted_name" => { - path_components = code.slice(child.byte_range()).to_string().split(".").map(|x| x.to_string()).collect(); + path_components = code + .slice(child.byte_range()) + .to_string() + .split(".") + .map(|x| x.to_string()) + .collect(); } "aliased_import" => { if let Some(name) = child.child_by_field_name("name") { - path_components = code.slice(name.byte_range()).to_string().split(".").map(|x| x.to_string()).collect(); + path_components = code + .slice(name.byte_range()) + .to_string() + .split(".") + .map(|x| x.to_string()) + .collect(); } if let Some(alias_node) = child.child_by_field_name("alias") { alias = Some(code.slice(alias_node.byte_range()).to_string()); @@ -597,7 +833,8 @@ impl PythonParser { def_local.import_type = ImportType::UserModule; } } - def_local.ast_fields.name = def_local.path_components.last().unwrap().to_string(); + def_local.ast_fields.name = + def_local.path_components.last().unwrap().to_string(); def_local.alias = alias; symbols.push(Arc::new(RwLock::new(Box::new(def_local)))); @@ -608,7 +845,12 @@ impl PythonParser { } } "ERROR" => { - symbols.extend(self.parse_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.parse_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); } _ => { for i in 0..info.node.child_count() { @@ -624,7 +866,12 @@ impl PythonParser { symbols } - pub fn parse_function_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_function_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields.language = info.ast_fields.language; @@ -637,7 +884,12 @@ impl PythonParser { decl.ast_fields.full_range = parent_node.range(); } } - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let mut decl_end_byte: usize = info.node.end_byte(); let mut decl_end_point: Point = info.node.end_position(); @@ -649,7 +901,12 @@ impl PythonParser { if let Some(parameters_node) = info.node.child_by_field_name("parameters") { decl_end_byte = parameters_node.end_byte(); decl_end_point = parameters_node.end_position(); - symbols.extend(self.find_error_usages(¶meters_node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters_node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let params_len = parameters_node.child_count(); let mut function_args = vec![]; @@ -664,7 +921,12 @@ impl PythonParser { decl.return_type = parse_type(&return_type, code); decl_end_byte = return_type.end_byte(); decl_end_point = return_type.end_position(); - symbols.extend(self.find_error_usages(&return_type, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &return_type, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); } if let Some(body_node) = info.node.child_by_field_name("body") { @@ -689,7 +951,13 @@ impl PythonParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -700,7 +968,13 @@ impl PythonParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "identifier" => { @@ -749,7 +1023,12 @@ impl PythonParser { symbols } - pub fn parse_call_expression<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_call_expression<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields.language = LanguageId::Python; @@ -763,13 +1042,20 @@ impl PythonParser { decl.ast_fields.caller_guid = Some(get_guid()); decl.ast_fields.is_error = info.ast_fields.is_error; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let arguments_node = info.node.child_by_field_name("arguments").unwrap(); for i in 0..arguments_node.child_count() { let child = arguments_node.child(i).unwrap(); let text = code.slice(child.byte_range()); - if SPECIAL_SYMBOLS.contains(&text) { continue; } + if SPECIAL_SYMBOLS.contains(&text) { + continue; + } let mut new_ast_fields = info.ast_fields.clone(); new_ast_fields.caller_guid = None; @@ -779,7 +1065,12 @@ impl PythonParser { parent_guid: info.parent_guid.clone(), }); } - symbols.extend(self.find_error_usages(&arguments_node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &arguments_node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); let function_node = info.node.child_by_field_name("function").unwrap(); let text = code.slice(function_node.byte_range()); @@ -828,8 +1119,10 @@ impl PythonParser { let symbols_l = self.parse_usages_(&candidate, code, &mut candidates); symbols.extend(symbols_l); } - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); for symbol in symbols.iter_mut() { let guid = symbol.read().guid().clone(); if let Some(parent_guid) = symbol.read().parent_guid() { @@ -842,10 +1135,20 @@ impl PythonParser { #[cfg(test)] for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } symbols @@ -855,10 +1158,13 @@ impl PythonParser { pub struct PythonSkeletonFormatter; impl SkeletonFormatter for PythonSkeletonFormatter { - fn make_skeleton(&self, symbol: &SymbolInformation, - text: &String, - guid_to_children: &HashMap>, - guid_to_info: &HashMap) -> String { + fn make_skeleton( + &self, + symbol: &SymbolInformation, + text: &String, + guid_to_children: &HashMap>, + guid_to_info: &HashMap, + ) -> String { let mut res_line = symbol.get_declaration_content(text).unwrap(); let children = guid_to_children.get(&symbol.guid).unwrap(); if children.is_empty() { @@ -878,7 +1184,11 @@ impl SkeletonFormatter for PythonSkeletonFormatter { res_line = format!("{} ...\n", res_line); } SymbolType::ClassFieldDeclaration => { - res_line = format!("{} {}\n", res_line, child_symbol.get_content(text).unwrap()); + res_line = format!( + "{} {}\n", + res_line, + child_symbol.get_content(text).unwrap() + ); } _ => {} } @@ -887,27 +1197,36 @@ impl SkeletonFormatter for PythonSkeletonFormatter { res_line } - fn get_declaration_with_comments(&self, - symbol: &SymbolInformation, - text: &String, - guid_to_children: &HashMap>, - guid_to_info: &HashMap) -> (String, (usize, usize)) { + fn get_declaration_with_comments( + &self, + symbol: &SymbolInformation, + text: &String, + guid_to_children: &HashMap>, + guid_to_info: &HashMap, + ) -> (String, (usize, usize)) { if let Some(children) = guid_to_children.get(&symbol.guid) { let mut res_line: Vec = Default::default(); let mut row = symbol.full_range.start_point.row; - let mut all_symbols = children.iter() + let mut all_symbols = children + .iter() .filter_map(|guid| guid_to_info.get(guid)) .collect::>(); - all_symbols.sort_by(|a, b| - a.full_range.start_byte.cmp(&b.full_range.start_byte) - ); + all_symbols.sort_by(|a, b| a.full_range.start_byte.cmp(&b.full_range.start_byte)); if symbol.symbol_type == SymbolType::FunctionDeclaration { - res_line = symbol.get_content(text).unwrap().split("\n").map(|x| x.to_string()).collect::>(); + res_line = symbol + .get_content(text) + .unwrap() + .split("\n") + .map(|x| x.to_string()) + .collect::>(); row = symbol.full_range.end_point.row; } else { - let mut content_lines = symbol.get_declaration_content(text).unwrap() + let mut content_lines = symbol + .get_declaration_content(text) + .unwrap() .split("\n") - .map(|x| x.to_string().replace("\t", " ")).collect::>(); + .map(|x| x.to_string().replace("\t", " ")) + .collect::>(); let mut intent_n = 0; if let Some(first) = content_lines.first_mut() { intent_n = first.len() - first.trim_start().len(); @@ -919,9 +1238,7 @@ impl SkeletonFormatter for PythonSkeletonFormatter { row = sym.full_range.end_point.row; let content = sym.get_content(text).unwrap(); let lines = content.split("\n").collect::>(); - let lines = lines.iter() - .map(|x| x.to_string()) - .collect::>(); + let lines = lines.iter().map(|x| x.to_string()).collect::>(); res_line.extend(lines); } if res_line.is_empty() { diff --git a/refact-agent/engine/src/ast/treesitter/parsers/rust.rs b/refact-agent/engine/src/ast/treesitter/parsers/rust.rs index 41dc0bfb0..bdb8a9585 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/rust.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/rust.rs @@ -7,21 +7,24 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Point, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolInstance, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeAlias, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolInstance, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeAlias, + TypeDef, VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{get_children_guids, get_guid}; - pub(crate) struct RustParser { pub parser: Parser, } static RUST_KEYWORDS: [&str; 37] = [ - "as", "async", "await", "break", "const", "continue", "crate", "dyn", "else", "enum", - "extern", "false", "fn", "for", "if", "impl", "in", "let", "loop", "match", "mod", "move", - "mut", "pub", "ref", "return", "self", "static", "struct", "super", "trait", "true", - "type", "unsafe", "use", "where", "while" + "as", "async", "await", "break", "const", "continue", "crate", "dyn", "else", "enum", "extern", + "false", "fn", "for", "if", "impl", "in", "let", "loop", "match", "mod", "move", "mut", "pub", + "ref", "return", "self", "static", "struct", "super", "trait", "true", "type", "unsafe", "use", + "where", "while", ]; impl RustParser { @@ -123,7 +126,14 @@ impl RustParser { None } - pub fn parse_function_declaration(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_function_declaration( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields.language = LanguageId::Rust; @@ -172,11 +182,17 @@ impl RustParser { if let Some(type_parameters) = parent.child_by_field_name("type_parameters") { let mut templates = vec![]; for idx in 0..type_parameters.child_count() { - if let Some(t) = RustParser::parse_type(&type_parameters.child(idx).unwrap(), code) { + if let Some(t) = RustParser::parse_type(&type_parameters.child(idx).unwrap(), code) + { templates.push(t); } } - symbols.extend(self.find_error_usages(&type_parameters, code, path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &type_parameters, + code, + path, + &decl.ast_fields.guid, + )); decl.template_types = templates; } decl.args = function_args; @@ -188,7 +204,13 @@ impl RustParser { start_point: decl.ast_fields.full_range.start_point, end_point: decl_end_point, }; - symbols.extend(self.parse_block(&body_node, code, path, &decl.ast_fields.guid, is_error)); + symbols.extend(self.parse_block( + &body_node, + code, + path, + &decl.ast_fields.guid, + is_error, + )); } else { decl.ast_fields.declaration_range = decl.ast_fields.full_range.clone(); } @@ -197,7 +219,14 @@ impl RustParser { symbols } - pub fn parse_struct_declaration(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_struct_declaration( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = StructDeclaration::default(); @@ -224,7 +253,12 @@ impl RustParser { if let Some(type_node) = parent.child_by_field_name("type") { symbols.extend(self.find_error_usages(&type_node, code, path, &decl.ast_fields.guid)); if let Some(trait_node) = parent.child_by_field_name("trait") { - symbols.extend(self.find_error_usages(&trait_node, code, path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &trait_node, + code, + path, + &decl.ast_fields.guid, + )); if let Some(trait_name) = RustParser::parse_type(&trait_node, code) { decl.template_types.push(trait_name); } @@ -250,21 +284,30 @@ impl RustParser { if let Some(body_node) = parent.child_by_field_name("body") { match body_node.kind() { "field_declaration_list" => { - symbols.extend(self.find_error_usages(&body_node, code, path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &body_node, + code, + path, + &decl.ast_fields.guid, + )); for idx in 0..body_node.child_count() { let field_declaration_node = body_node.child(idx).unwrap(); match field_declaration_node.kind() { "field_declaration" => { - let _text = code.slice(field_declaration_node.byte_range()).to_string(); - let name_node = field_declaration_node.child_by_field_name("name").unwrap(); - let type_node = field_declaration_node.child_by_field_name("type").unwrap(); + let _text = + code.slice(field_declaration_node.byte_range()).to_string(); + let name_node = + field_declaration_node.child_by_field_name("name").unwrap(); + let type_node = + field_declaration_node.child_by_field_name("type").unwrap(); let mut decl_ = ClassFieldDeclaration::default(); decl_.ast_fields.full_range = field_declaration_node.range(); decl_.ast_fields.declaration_range = field_declaration_node.range(); decl_.ast_fields.file_path = path.clone(); decl_.ast_fields.parent_guid = Some(decl.ast_fields.guid.clone()); decl_.ast_fields.guid = get_guid(); - decl_.ast_fields.name = code.slice(name_node.byte_range()).to_string(); + decl_.ast_fields.name = + code.slice(name_node.byte_range()).to_string(); decl_.ast_fields.language = LanguageId::Rust; if let Some(type_) = RustParser::parse_type(&type_node, code) { decl_.type_ = type_; @@ -276,7 +319,13 @@ impl RustParser { } } "declaration_list" => { - symbols.extend(self.parse_block(&body_node, code, path, &decl.ast_fields.guid, is_error)); + symbols.extend(self.parse_block( + &body_node, + code, + path, + &decl.ast_fields.guid, + is_error, + )); } &_ => {} } @@ -287,7 +336,14 @@ impl RustParser { symbols } - pub fn parse_call_expression(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_call_expression( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields.language = LanguageId::Rust; @@ -308,7 +364,8 @@ impl RustParser { let field = function_node.child_by_field_name("field").unwrap(); decl.ast_fields.name = code.slice(field.byte_range()).to_string(); let value_node = function_node.child_by_field_name("value").unwrap(); - let usages = self.parse_usages(&value_node, code, path, parent_guid, is_error); + let usages = + self.parse_usages(&value_node, code, path, parent_guid, is_error); if !usages.is_empty() { if let Some(last) = usages.last() { // dirty hack: last element is first element in the tree @@ -320,7 +377,12 @@ impl RustParser { "scoped_identifier" => { let namespace = { if let Some(namespace) = parent.child_by_field_name("path") { - symbols.extend(self.find_error_usages(&namespace, code, path, &parent_guid)); + symbols.extend(self.find_error_usages( + &namespace, + code, + path, + &parent_guid, + )); code.slice(namespace.byte_range()).to_string() } else { "".to_string() @@ -349,7 +411,8 @@ impl RustParser { symbols.extend(self.find_error_usages(&arguments_node, code, path, &parent_guid)); for idx in 0..arguments_node.child_count() { let arg_node = arguments_node.child(idx).unwrap(); - let arg_type = self.parse_usages(&arg_node, code, path, &decl.ast_fields.guid, is_error); + let arg_type = + self.parse_usages(&arg_node, code, path, &decl.ast_fields.guid, is_error); symbols.extend(arg_type); } } @@ -358,7 +421,14 @@ impl RustParser { symbols } - pub fn parse_variable_definition(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_variable_definition( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { fn parse_type_in_value(parent: &Node, code: &str) -> TypeDef { let mut dtype = TypeDef::default(); let kind = parent.kind(); @@ -401,12 +471,8 @@ impl RustParser { } let pattern_node = match parent.kind() { - "const_item" | "static_item" => { - parent.child_by_field_name("name").unwrap() - } - _ => { - parent.child_by_field_name("pattern").unwrap() - } + "const_item" | "static_item" => parent.child_by_field_name("name").unwrap(), + _ => parent.child_by_field_name("pattern").unwrap(), }; let kind = pattern_node.kind(); @@ -449,7 +515,14 @@ impl RustParser { symbols } - pub fn parse_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = parent.kind(); let _text = code.slice(parent.byte_range()).to_string(); @@ -481,10 +554,22 @@ impl RustParser { symbols.extend(self.parse_usages(&right, code, path, parent_guid, is_error)); } "call_expression" => { - symbols.extend(self.parse_call_expression(&parent, code, path, parent_guid, is_error)); + symbols.extend(self.parse_call_expression( + &parent, + code, + path, + parent_guid, + is_error, + )); } "let_condition" => { - symbols.extend(self.parse_variable_definition(&parent, code, path, parent_guid, is_error)); + symbols.extend(self.parse_variable_definition( + &parent, + code, + path, + parent_guid, + is_error, + )); } "field_expression" => { let field_node = parent.child_by_field_name("field").unwrap(); @@ -539,20 +624,45 @@ impl RustParser { "tuple_expression" => { for idx in 0..parent.child_count() { let tuple_child_node = parent.child(idx).unwrap(); - symbols.extend(self.parse_usages(&tuple_child_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_usages( + &tuple_child_node, + code, + path, + parent_guid, + is_error, + )); } } "struct_expression" => { - symbols.extend(self.parse_call_expression(&parent, code, path, parent_guid, is_error)); + symbols.extend(self.parse_call_expression( + &parent, + code, + path, + parent_guid, + is_error, + )); } "if_expression" => { let condition_node = parent.child_by_field_name("condition").unwrap(); - symbols.extend(self.parse_usages(&condition_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_usages( + &condition_node, + code, + path, + parent_guid, + is_error, + )); let consequence_node = parent.child_by_field_name("consequence").unwrap(); - symbols.extend(self.parse_expression_statement(&consequence_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_expression_statement( + &consequence_node, + code, + path, + parent_guid, + is_error, + )); if let Some(alternative_node) = parent.child_by_field_name("alternative") { let child = alternative_node.child(1).unwrap(); - let v = self.parse_expression_statement(&child, code, path, parent_guid, is_error); + let v = + self.parse_expression_statement(&child, code, path, parent_guid, is_error); symbols.extend(v); } } @@ -567,7 +677,8 @@ impl RustParser { } "match_arm" => { let pattern_node = parent.child_by_field_name("pattern").unwrap(); - let mut symbols = self.parse_usages(&pattern_node, code, path, parent_guid, is_error); + let mut symbols = + self.parse_usages(&pattern_node, code, path, parent_guid, is_error); let value_node = parent.child_by_field_name("value").unwrap(); symbols.extend(self.parse_usages(&value_node, code, path, parent_guid, is_error)); } @@ -578,20 +689,45 @@ impl RustParser { } } "for_expression" => { - let symbols_ = self.parse_variable_definition(&parent, code, path, parent_guid, is_error); + let symbols_ = + self.parse_variable_definition(&parent, code, path, parent_guid, is_error); symbols.extend(symbols_); let body_node = parent.child_by_field_name("body").unwrap(); - symbols.extend(self.parse_expression_statement(&body_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_expression_statement( + &body_node, + code, + path, + parent_guid, + is_error, + )); } "while_expression" => { let condition_node = parent.child_by_field_name("condition").unwrap(); - symbols.extend(self.parse_usages(&condition_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_usages( + &condition_node, + code, + path, + parent_guid, + is_error, + )); let body_node = parent.child_by_field_name("body").unwrap(); - symbols.extend(self.parse_expression_statement(&body_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_expression_statement( + &body_node, + code, + path, + parent_guid, + is_error, + )); } "loop_expression" => { let body_node = parent.child_by_field_name("body").unwrap(); - symbols.extend(self.parse_expression_statement(&body_node, code, path, parent_guid, is_error)); + symbols.extend(self.parse_expression_statement( + &body_node, + code, + path, + parent_guid, + is_error, + )); } "ERROR" => { symbols.extend(self.parse_error_usages(&parent, code, path, parent_guid)); @@ -601,7 +737,13 @@ impl RustParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -612,7 +754,13 @@ impl RustParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "field_expression" => { @@ -687,7 +835,14 @@ impl RustParser { symbols } - pub fn parse_expression_statement(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_expression_statement( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols = vec![]; let kind = parent.kind(); let _text = code.slice(parent.byte_range()).to_string(); @@ -717,7 +872,14 @@ impl RustParser { symbols } - fn parse_use_declaration(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + fn parse_use_declaration( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = vec![]; let argument_node = parent.child_by_field_name("argument").unwrap(); match argument_node.kind() { @@ -731,7 +893,8 @@ impl RustParser { def.ast_fields.file_path = path.clone(); def.ast_fields.parent_guid = Some(parent_guid.clone()); def.ast_fields.guid = get_guid(); - def.path_components = code.slice(argument_node.byte_range()) + def.path_components = code + .slice(argument_node.byte_range()) .split("::") .map(|s| s.to_string()) .collect(); @@ -769,7 +932,8 @@ impl RustParser { def.ast_fields.file_path = path.clone(); def.ast_fields.parent_guid = Some(parent_guid.clone()); def.ast_fields.guid = get_guid(); - def.path_components = code.slice(argument_node.byte_range()) + def.path_components = code + .slice(argument_node.byte_range()) .split("::") .map(|s| s.to_string()) .collect(); @@ -785,7 +949,8 @@ impl RustParser { "scoped_use_list" => { let base_path = { if let Some(path) = argument_node.child_by_field_name("path") { - code.slice(path.byte_range()).split("::") + code.slice(path.byte_range()) + .split("::") .map(|s| s.to_string()) .collect() } else { @@ -795,7 +960,9 @@ impl RustParser { if let Some(list_node) = argument_node.child_by_field_name("list") { for i in 0..list_node.child_count() { let child = list_node.child(i).unwrap(); - if !["use_as_clause", "identifier", "scoped_identifier"].contains(&child.kind()) { + if !["use_as_clause", "identifier", "scoped_identifier"] + .contains(&child.kind()) + { continue; } let mut def = ImportDeclaration::default(); @@ -808,17 +975,28 @@ impl RustParser { match child.kind() { "use_as_clause" => { if let Some(path) = child.child_by_field_name("path") { - def.path_components.extend(code.slice(path.byte_range()).split("::").map(|s| s.to_string()).collect::>()); + def.path_components.extend( + code.slice(path.byte_range()) + .split("::") + .map(|s| s.to_string()) + .collect::>(), + ); } if let Some(alias) = child.child_by_field_name("alias") { def.alias = Some(code.slice(alias.byte_range()).to_string()); } } "identifier" => { - def.path_components.push(code.slice(child.byte_range()).to_string()); + def.path_components + .push(code.slice(child.byte_range()).to_string()); } "scoped_identifier" => { - def.path_components.extend(code.slice(child.byte_range()).split("::").map(|s| s.to_string()).collect::>()); + def.path_components.extend( + code.slice(child.byte_range()) + .split("::") + .map(|s| s.to_string()) + .collect::>(), + ); } _ => {} } @@ -839,7 +1017,8 @@ impl RustParser { match child.kind() { "use_as_clause" => { let alias_node = child.child_by_field_name("alias").unwrap(); - let alias: Option = Some(code.slice(alias_node.byte_range()).to_string()); + let alias: Option = + Some(code.slice(alias_node.byte_range()).to_string()); if let Some(path_node) = child.child_by_field_name("path") { match path_node.kind() { "scoped_identifier" => { @@ -849,7 +1028,11 @@ impl RustParser { def.ast_fields.file_path = path.clone(); def.ast_fields.parent_guid = Some(parent_guid.clone()); def.ast_fields.guid = get_guid(); - def.path_components = code.slice(path_node.byte_range()).split("::").map(|s| s.to_string()).collect(); + def.path_components = code + .slice(path_node.byte_range()) + .split("::") + .map(|s| s.to_string()) + .collect(); if let Some(first) = def.path_components.first() { if first == "std" { def.import_type = ImportType::System; @@ -862,15 +1045,19 @@ impl RustParser { } _ => { let mut type_alias = TypeAlias::default(); - type_alias.ast_fields.name = code.slice(alias_node.byte_range()).to_string(); + type_alias.ast_fields.name = + code.slice(alias_node.byte_range()).to_string(); type_alias.ast_fields.language = LanguageId::Rust; type_alias.ast_fields.full_range = parent.range(); type_alias.ast_fields.file_path = path.clone(); - type_alias.ast_fields.parent_guid = Some(parent_guid.clone()); + type_alias.ast_fields.parent_guid = + Some(parent_guid.clone()); type_alias.ast_fields.guid = get_guid(); type_alias.ast_fields.is_error = is_error; - if let Some(dtype) = RustParser::parse_type(&path_node, code) { + if let Some(dtype) = + RustParser::parse_type(&path_node, code) + { type_alias.types.push(dtype); } symbols.push(Arc::new(RwLock::new(Box::new(type_alias)))); @@ -896,7 +1083,11 @@ impl RustParser { def.ast_fields.file_path = path.clone(); def.ast_fields.parent_guid = Some(parent_guid.clone()); def.ast_fields.guid = get_guid(); - def.path_components = code.slice(child.byte_range()).split("::").map(|s| s.to_string()).collect(); + def.path_components = code + .slice(child.byte_range()) + .split("::") + .map(|s| s.to_string()) + .collect(); if let Some(first) = def.path_components.first() { if first == "std" { def.import_type = ImportType::System; @@ -926,7 +1117,14 @@ impl RustParser { symbols } - pub fn parse_block(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid, is_error: bool) -> Vec { + pub fn parse_block( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + is_error: bool, + ) -> Vec { let mut symbols: Vec = vec![]; for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -934,7 +1132,13 @@ impl RustParser { let _text = code.slice(child.byte_range()).to_string(); match kind { "use_declaration" => { - symbols.extend(self.parse_use_declaration(&child, code, path, parent_guid, is_error)); + symbols.extend(self.parse_use_declaration( + &child, + code, + path, + parent_guid, + is_error, + )); } "type_item" => { let name_node = child.child_by_field_name("name").unwrap(); @@ -958,12 +1162,14 @@ impl RustParser { symbols.extend(v); } "let_declaration" | "const_item" | "static_item" => { - let symbols_ = self.parse_variable_definition(&child, code, path, parent_guid, is_error); + let symbols_ = + self.parse_variable_definition(&child, code, path, parent_guid, is_error); symbols.extend(symbols_); } "expression_statement" => { let child = child.child(0).unwrap(); - let v = self.parse_expression_statement(&child, code, path, parent_guid, is_error); + let v = + self.parse_expression_statement(&child, code, path, parent_guid, is_error); symbols.extend(v); } // return without keyword @@ -972,14 +1178,27 @@ impl RustParser { } // return without keyword "call_expression" => { - let symbols_ = self.parse_call_expression(&child, code, path, parent_guid, is_error); + let symbols_ = + self.parse_call_expression(&child, code, path, parent_guid, is_error); symbols.extend(symbols_); } "enum_item" | "struct_item" | "trait_item" | "impl_item" | "union_item" => { - symbols.extend(self.parse_struct_declaration(&child, code, path, parent_guid, is_error)); + symbols.extend(self.parse_struct_declaration( + &child, + code, + path, + parent_guid, + is_error, + )); } "function_item" | "function_signature_item" => { - symbols.extend(self.parse_function_declaration(&child, code, path, parent_guid, is_error)); + symbols.extend(self.parse_function_declaration( + &child, + code, + path, + parent_guid, + is_error, + )); } "line_comment" | "block_comment" => { let mut def = CommentDefinition::default(); diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests.rs index 4b0b7483c..eb386e053 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests.rs @@ -9,25 +9,32 @@ use similar::DiffableStr; use uuid::Uuid; use crate::ast::treesitter::file_ast_markup::FileASTMarkup; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolInstance, AstSymbolInstanceArc, SymbolInformation}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolInstance, AstSymbolInstanceArc, SymbolInformation, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::skeletonizer::make_formatter; use crate::ast::treesitter::structs::SymbolType; use crate::files_in_workspace::Document; -mod rust; -mod python; +mod cpp; mod java; +mod js; mod kotlin; -mod cpp; +mod python; +mod rust; mod ts; -mod js; pub(crate) fn print(symbols: &Vec, code: &str) { - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.read().guid().clone(), s.clone())).collect::>(); - let sorted = symbols.iter().sorted_by_key(|x| x.read().full_range().start_byte).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.read().guid().clone(), s.clone())) + .collect::>(); + let sorted = symbols + .iter() + .sorted_by_key(|x| x.read().full_range().start_byte) + .collect::>(); let mut used_guids: HashSet = Default::default(); for sym in sorted { @@ -45,9 +52,20 @@ pub(crate) fn print(symbols: &Vec, code: &str) { } let full_range = sym.read().full_range().clone(); let range = full_range.start_byte..full_range.end_byte; - println!("{0} {1} [{2}] {3}", guid.to_string().slice(0..6), name, code.slice(range).lines().collect::>().first().unwrap(), type_name); + println!( + "{0} {1} [{2}] {3}", + guid.to_string().slice(0..6), + name, + code.slice(range) + .lines() + .collect::>() + .first() + .unwrap(), + type_name + ); used_guids.insert(guid.clone()); - let mut candidates: VecDeque<(i32, Uuid)> = VecDeque::from_iter(sym.read().childs_guid().iter().map(|x| (4, x.clone()))); + let mut candidates: VecDeque<(i32, Uuid)> = + VecDeque::from_iter(sym.read().childs_guid().iter().map(|x| (4, x.clone()))); while let Some((offest, cand)) = candidates.pop_front() { used_guids.insert(cand.clone()); if let Some(sym_l) = guid_to_symbol_map.get(&cand) { @@ -61,9 +79,25 @@ pub(crate) fn print(symbols: &Vec, code: &str) { } let full_range = sym_l.read().full_range().clone(); let range = full_range.start_byte..full_range.end_byte; - println!("{0} {1} {2} [{3}] {4}", cand.to_string().slice(0..6), str::repeat(" ", offest as usize), - name, code.slice(range).lines().collect::>().first().unwrap(), type_name); - let mut new_candidates = VecDeque::from_iter(sym_l.read().childs_guid().iter().map(|x| (offest + 2, x.clone()))); + println!( + "{0} {1} {2} [{3}] {4}", + cand.to_string().slice(0..6), + str::repeat(" ", offest as usize), + name, + code.slice(range) + .lines() + .collect::>() + .first() + .unwrap(), + type_name + ); + let mut new_candidates = VecDeque::from_iter( + sym_l + .read() + .childs_guid() + .iter() + .map(|x| (offest + 2, x.clone())), + ); new_candidates.extend(candidates.clone()); candidates = new_candidates; } @@ -71,14 +105,16 @@ pub(crate) fn print(symbols: &Vec, code: &str) { } } -fn eq_symbols(symbol: &AstSymbolInstanceArc, - ref_symbol: &Box) -> bool { +fn eq_symbols(symbol: &AstSymbolInstanceArc, ref_symbol: &Box) -> bool { let symbol = symbol.read(); let _f = symbol.fields(); let _ref_f = ref_symbol.fields(); let sym_type = symbol.symbol_type() == ref_symbol.symbol_type(); - let name = if ref_symbol.name().contains(ref_symbol.guid().to_string().as_str()) { + let name = if ref_symbol + .name() + .contains(ref_symbol.guid().to_string().as_str()) + { symbol.name().contains(symbol.guid().to_string().as_str()) } else { symbol.name() == ref_symbol.name() @@ -95,14 +131,31 @@ fn eq_symbols(symbol: &AstSymbolInstanceArc, let definition_range = symbol.definition_range() == ref_symbol.definition_range(); let is_error = symbol.is_error() == ref_symbol.is_error(); - sym_type && name && lang && file_path && is_type && is_declaration && - namespace && full_range && declaration_range && definition_range && is_error + sym_type + && name + && lang + && file_path + && is_type + && is_declaration + && namespace + && full_range + && declaration_range + && definition_range + && is_error } -fn compare_symbols(symbols: &Vec, - ref_symbols: &Vec>) { - let guid_to_sym = symbols.iter().map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); - let ref_guid_to_sym = ref_symbols.iter().map(|s| (s.guid().clone(), s)).collect::>(); +fn compare_symbols( + symbols: &Vec, + ref_symbols: &Vec>, +) { + let guid_to_sym = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); + let ref_guid_to_sym = ref_symbols + .iter() + .map(|s| (s.guid().clone(), s)) + .collect::>(); let mut checked_guids: HashSet = Default::default(); for sym in symbols { let sym_l = sym.read(); @@ -111,12 +164,15 @@ fn compare_symbols(symbols: &Vec, if checked_guids.contains(&sym_l.guid()) { continue; } - let closest_sym = ref_symbols.iter().filter(|s| sym_l.full_range() == s.full_range()) + let closest_sym = ref_symbols + .iter() + .filter(|s| sym_l.full_range() == s.full_range()) .filter(|x| eq_symbols(&sym, x)) .collect::>(); assert_eq!(closest_sym.len(), 1); let closest_sym = closest_sym.first().unwrap(); - let mut candidates: Vec<(AstSymbolInstanceArc, &Box)> = vec![(sym.clone(), &closest_sym)]; + let mut candidates: Vec<(AstSymbolInstanceArc, &Box)> = + vec![(sym.clone(), &closest_sym)]; while let Some((sym, ref_sym)) = candidates.pop() { let sym_l = sym.read(); if checked_guids.contains(&sym_l.guid()) { @@ -134,33 +190,46 @@ fn compare_symbols(symbols: &Vec, ); if sym_l.parent_guid().is_some() { if let Some(parent) = guid_to_sym.get(&sym_l.parent_guid().unwrap()) { - let ref_parent = ref_guid_to_sym.get(&ref_sym.parent_guid().unwrap()).unwrap(); + let ref_parent = ref_guid_to_sym + .get(&ref_sym.parent_guid().unwrap()) + .unwrap(); candidates.push((parent.clone(), ref_parent)); } } assert_eq!(sym_l.childs_guid().len(), ref_sym.childs_guid().len()); - let childs = sym_l.childs_guid().iter().filter_map(|x| guid_to_sym.get(x)) + let childs = sym_l + .childs_guid() + .iter() + .filter_map(|x| guid_to_sym.get(x)) .collect::>(); - let ref_childs = ref_sym.childs_guid().iter().filter_map(|x| ref_guid_to_sym.get(x)) + let ref_childs = ref_sym + .childs_guid() + .iter() + .filter_map(|x| ref_guid_to_sym.get(x)) .collect::>(); for child in childs { let child_l = child.read(); - let closest_sym = ref_childs.iter().filter(|s| child_l.full_range() == s.full_range()) + let closest_sym = ref_childs + .iter() + .filter(|s| child_l.full_range() == s.full_range()) .collect::>(); assert_eq!(closest_sym.len(), 1); let closest_sym = closest_sym.first().unwrap(); candidates.push((child.clone(), closest_sym)); } - assert!((sym_l.get_caller_guid().is_some() && ref_sym.get_caller_guid().is_some()) - || (sym_l.get_caller_guid().is_none() && ref_sym.get_caller_guid().is_none()) + assert!( + (sym_l.get_caller_guid().is_some() && ref_sym.get_caller_guid().is_some()) + || (sym_l.get_caller_guid().is_none() && ref_sym.get_caller_guid().is_none()) ); if sym_l.get_caller_guid().is_some() { if let Some(caller) = guid_to_sym.get(&sym_l.get_caller_guid().unwrap()) { - let ref_caller = ref_guid_to_sym.get(&ref_sym.get_caller_guid().unwrap()).unwrap(); + let ref_caller = ref_guid_to_sym + .get(&ref_sym.get_caller_guid().unwrap()) + .unwrap(); candidates.push((caller.clone(), ref_caller)); } } @@ -188,9 +257,12 @@ fn check_duplicates_with_ref(symbols: &Vec>) { } } -pub(crate) fn base_parser_test(parser: &mut Box, - path: &PathBuf, - code: &str, symbols_str: &str) { +pub(crate) fn base_parser_test( + parser: &mut Box, + path: &PathBuf, + code: &str, + symbols_str: &str, +) { // Normalize line endings to LF to ensure consistent byte offsets across platforms let normalized_code = code.replace("\r\n", "\n"); let symbols = parser.parse(&normalized_code, &path); @@ -211,27 +283,48 @@ struct Skeleton { pub line: String, } -pub(crate) fn base_skeletonizer_test(lang: &LanguageId, - parser: &mut Box, - file: &PathBuf, - code: &str, skeleton_ref_str: &str) { +pub(crate) fn base_skeletonizer_test( + lang: &LanguageId, + parser: &mut Box, + file: &PathBuf, + code: &str, + skeleton_ref_str: &str, +) { // Normalize line endings to LF to ensure consistent byte offsets across platforms let normalized_code = code.replace("\r\n", "\n"); let symbols = parser.parse(&normalized_code, &file); - let symbols_struct = symbols.iter().map(|s| s.read().symbol_info_struct()).collect(); + let symbols_struct = symbols + .iter() + .map(|s| s.read().symbol_info_struct()) + .collect(); let doc = Document { doc_path: file.clone(), doc_text: Some(Rope::from_str(&normalized_code)), }; - let guid_to_children: HashMap> = symbols.iter().map(|s| (s.read().guid().clone(), s.read().childs_guid().clone())).collect(); - let ast_markup: FileASTMarkup = crate::ast::lowlevel_file_markup(&doc, &symbols_struct).unwrap(); - let guid_to_info: HashMap = ast_markup.symbols_sorted_by_path_len.iter().map(|s| (s.guid.clone(), s)).collect(); + let guid_to_children: HashMap> = symbols + .iter() + .map(|s| (s.read().guid().clone(), s.read().childs_guid().clone())) + .collect(); + let ast_markup: FileASTMarkup = + crate::ast::lowlevel_file_markup(&doc, &symbols_struct).unwrap(); + let guid_to_info: HashMap = ast_markup + .symbols_sorted_by_path_len + .iter() + .map(|s| (s.guid.clone(), s)) + .collect(); let formatter = make_formatter(lang); - let class_symbols: Vec<_> = ast_markup.symbols_sorted_by_path_len.iter().filter(|x| x.symbol_type == SymbolType::StructDeclaration).collect(); + let class_symbols: Vec<_> = ast_markup + .symbols_sorted_by_path_len + .iter() + .filter(|x| x.symbol_type == SymbolType::StructDeclaration) + .collect(); let mut skeletons: HashSet = Default::default(); for symbol in class_symbols { - let skeleton_line = formatter.make_skeleton(&symbol, &normalized_code, &guid_to_children, &guid_to_info); - skeletons.insert(Skeleton { line: skeleton_line }); + let skeleton_line = + formatter.make_skeleton(&symbol, &normalized_code, &guid_to_children, &guid_to_info); + skeletons.insert(Skeleton { + line: skeleton_line, + }); } // use std::fs; // let symbols_str_ = serde_json::to_string_pretty(&skeletons).unwrap(); @@ -241,7 +334,6 @@ pub(crate) fn base_skeletonizer_test(lang: &LanguageId, assert_eq!(skeletons, ref_skeletons); } - #[derive(Default, Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] struct Decl { pub top_row: usize, @@ -249,29 +341,53 @@ struct Decl { pub line: String, } -pub(crate) fn base_declaration_formatter_test(lang: &LanguageId, - parser: &mut Box, - file: &PathBuf, - code: &str, decls_ref_str: &str) { +pub(crate) fn base_declaration_formatter_test( + lang: &LanguageId, + parser: &mut Box, + file: &PathBuf, + code: &str, + decls_ref_str: &str, +) { // Normalize line endings to LF to ensure consistent byte offsets across platforms let normalized_code = code.replace("\r\n", "\n"); let symbols = parser.parse(&normalized_code, &file); - let symbols_struct = symbols.iter().map(|s| s.read().symbol_info_struct()).collect(); + let symbols_struct = symbols + .iter() + .map(|s| s.read().symbol_info_struct()) + .collect(); let doc = Document { doc_path: file.clone(), doc_text: Some(Rope::from_str(&normalized_code)), }; - let guid_to_children: HashMap> = symbols.iter().map(|s| (s.read().guid().clone(), s.read().childs_guid().clone())).collect(); - let ast_markup: FileASTMarkup = crate::ast::lowlevel_file_markup(&doc, &symbols_struct).unwrap(); - let guid_to_info: HashMap = ast_markup.symbols_sorted_by_path_len.iter().map(|s| (s.guid.clone(), s)).collect(); + let guid_to_children: HashMap> = symbols + .iter() + .map(|s| (s.read().guid().clone(), s.read().childs_guid().clone())) + .collect(); + let ast_markup: FileASTMarkup = + crate::ast::lowlevel_file_markup(&doc, &symbols_struct).unwrap(); + let guid_to_info: HashMap = ast_markup + .symbols_sorted_by_path_len + .iter() + .map(|s| (s.guid.clone(), s)) + .collect(); let formatter = make_formatter(lang); let mut decls: HashSet = Default::default(); for symbol in &guid_to_info { let symbol = guid_to_info.get(&symbol.0).unwrap(); - if !vec![SymbolType::StructDeclaration, SymbolType::FunctionDeclaration].contains(&symbol.symbol_type) { + if !vec![ + SymbolType::StructDeclaration, + SymbolType::FunctionDeclaration, + ] + .contains(&symbol.symbol_type) + { continue; } - let (line, (top_row, bottom_row)) = formatter.get_declaration_with_comments(&symbol, &normalized_code, &guid_to_children, &guid_to_info); + let (line, (top_row, bottom_row)) = formatter.get_declaration_with_comments( + &symbol, + &normalized_code, + &guid_to_children, + &guid_to_info, + ); if !line.is_empty() { decls.insert(Decl { top_row, diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/cpp.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/cpp.rs index 282d93435..74e5daaef 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/cpp.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/cpp.rs @@ -6,7 +6,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::parsers::cpp::CppParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; const MAIN_CPP_CODE: &str = include_str!("cases/cpp/main.cpp"); const MAIN_CPP_SYMBOLS: &str = include_str!("cases/cpp/main.cpp.json"); @@ -17,25 +19,48 @@ mod tests { #[test] fn parser_test() { - let mut parser: Box = Box::new(CppParser::new().expect("CppParser::new")); + let mut parser: Box = + Box::new(CppParser::new().expect("CppParser::new")); let path = PathBuf::from("/main.cpp"); base_parser_test(&mut parser, &path, MAIN_CPP_CODE, MAIN_CPP_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(CppParser::new().expect("CppParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/cpp/circle.cpp"); + let mut parser: Box = + Box::new(CppParser::new().expect("CppParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/cpp/circle.cpp"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::Cpp, &mut parser, &file, CIRCLE_CPP_CODE, CIRCLE_CPP_SKELETON); + base_skeletonizer_test( + &LanguageId::Cpp, + &mut parser, + &file, + CIRCLE_CPP_CODE, + CIRCLE_CPP_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(CppParser::new().expect("CppParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/cpp/circle.cpp"); + let mut parser: Box = + Box::new(CppParser::new().expect("CppParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/cpp/circle.cpp"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::Cpp, &mut parser, &file, CIRCLE_CPP_CODE, CIRCLE_CPP_DECLS); + base_declaration_formatter_test( + &LanguageId::Cpp, + &mut parser, + &file, + CIRCLE_CPP_CODE, + CIRCLE_CPP_DECLS, + ); } -} \ No newline at end of file +} diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/java.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/java.rs index 31eaa963d..0f5fd3cba 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/java.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/java.rs @@ -6,7 +6,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::parsers::java::JavaParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; const MAIN_JAVA_CODE: &str = include_str!("cases/java/main.java"); const MAIN_JAVA_SYMBOLS: &str = include_str!("cases/java/main.java.json"); @@ -17,25 +19,48 @@ mod tests { #[test] fn parser_test() { - let mut parser: Box = Box::new(JavaParser::new().expect("JavaParser::new")); + let mut parser: Box = + Box::new(JavaParser::new().expect("JavaParser::new")); let path = PathBuf::from("file:///main.java"); base_parser_test(&mut parser, &path, MAIN_JAVA_CODE, MAIN_JAVA_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(JavaParser::new().expect("JavaParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/java/person.java"); + let mut parser: Box = + Box::new(JavaParser::new().expect("JavaParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/java/person.java"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::Java, &mut parser, &file, PERSON_JAVA_CODE, PERSON_JAVA_SKELETON); + base_skeletonizer_test( + &LanguageId::Java, + &mut parser, + &file, + PERSON_JAVA_CODE, + PERSON_JAVA_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(JavaParser::new().expect("JavaParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/java/person.java"); + let mut parser: Box = + Box::new(JavaParser::new().expect("JavaParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/java/person.java"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::Java, &mut parser, &file, PERSON_JAVA_CODE, PERSON_JAVA_DECLS); + base_declaration_formatter_test( + &LanguageId::Java, + &mut parser, + &file, + PERSON_JAVA_CODE, + PERSON_JAVA_DECLS, + ); } } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/js.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/js.rs index a8d829388..7d80a4b7a 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/js.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/js.rs @@ -6,7 +6,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::parsers::js::JSParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; const MAIN_JS_CODE: &str = include_str!("cases/js/main.js"); const MAIN_JS_SYMBOLS: &str = include_str!("cases/js/main.js.json"); @@ -17,25 +19,48 @@ mod tests { #[test] fn parser_test() { - let mut parser: Box = Box::new(JSParser::new().expect("JSParser::new")); + let mut parser: Box = + Box::new(JSParser::new().expect("JSParser::new")); let path = PathBuf::from("file:///main.js"); base_parser_test(&mut parser, &path, MAIN_JS_CODE, MAIN_JS_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(JSParser::new().expect("JSParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/js/car.js"); + let mut parser: Box = + Box::new(JSParser::new().expect("JSParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/js/car.js"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::JavaScript, &mut parser, &file, CAR_JS_CODE, CAR_JS_SKELETON); + base_skeletonizer_test( + &LanguageId::JavaScript, + &mut parser, + &file, + CAR_JS_CODE, + CAR_JS_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(JSParser::new().expect("JSParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/js/car.js"); + let mut parser: Box = + Box::new(JSParser::new().expect("JSParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/js/car.js"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::JavaScript, &mut parser, &file, CAR_JS_CODE, CAR_JS_DECLS); + base_declaration_formatter_test( + &LanguageId::JavaScript, + &mut parser, + &file, + CAR_JS_CODE, + CAR_JS_DECLS, + ); } } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/kotlin.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/kotlin.rs index 22eca8d6a..13f36d063 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/kotlin.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/kotlin.rs @@ -2,12 +2,15 @@ use std::path::PathBuf; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::kotlin::KotlinParser; -use crate::ast::treesitter::parsers::tests::{base_parser_test, base_skeletonizer_test, base_declaration_formatter_test}; +use crate::ast::treesitter::parsers::tests::{ + base_parser_test, base_skeletonizer_test, base_declaration_formatter_test, +}; #[test] fn test_kotlin_main() { let parser = KotlinParser::new().unwrap(); - let mut boxed_parser: Box = Box::new(parser); + let mut boxed_parser: Box = + Box::new(parser); let path = PathBuf::from("main.kt"); let code = include_str!("cases/kotlin/main.kt"); let symbols_str = include_str!("cases/kotlin/main.kt.json"); @@ -17,7 +20,8 @@ fn test_kotlin_main() { #[test] fn test_kotlin_person() { let parser = KotlinParser::new().unwrap(); - let mut boxed_parser: Box = Box::new(parser); + let mut boxed_parser: Box = + Box::new(parser); let path = PathBuf::from("person.kt"); let code = include_str!("cases/kotlin/person.kt"); let symbols_str = include_str!("cases/kotlin/person.kt.json"); @@ -27,27 +31,42 @@ fn test_kotlin_person() { #[test] fn test_kotlin_skeletonizer() { let parser = KotlinParser::new().unwrap(); - let mut boxed_parser: Box = Box::new(parser); + let mut boxed_parser: Box = + Box::new(parser); let path = PathBuf::from("person.kt"); let code = include_str!("cases/kotlin/person.kt"); let skeleton_ref_str = include_str!("cases/kotlin/person.kt.skeleton"); - base_skeletonizer_test(&LanguageId::Kotlin, &mut boxed_parser, &path, code, skeleton_ref_str); + base_skeletonizer_test( + &LanguageId::Kotlin, + &mut boxed_parser, + &path, + code, + skeleton_ref_str, + ); } #[test] fn test_kotlin_declaration_formatter() { let parser = KotlinParser::new().unwrap(); - let mut boxed_parser: Box = Box::new(parser); + let mut boxed_parser: Box = + Box::new(parser); let path = PathBuf::from("person.kt"); let code = include_str!("cases/kotlin/person.kt"); let decls_ref_str = include_str!("cases/kotlin/person.kt.decl_json"); - base_declaration_formatter_test(&LanguageId::Kotlin, &mut boxed_parser, &path, code, decls_ref_str); + base_declaration_formatter_test( + &LanguageId::Kotlin, + &mut boxed_parser, + &path, + code, + decls_ref_str, + ); } #[test] fn test_kotlin_lambda_properties() { let parser = KotlinParser::new().unwrap(); - let mut boxed_parser: Box = Box::new(parser); + let mut boxed_parser: Box = + Box::new(parser); let path = PathBuf::from("lambda_test.kt"); let code = r#" class TestClass { @@ -63,20 +82,23 @@ class TestClass { } "#; let symbols = boxed_parser.parse(code, &path); - + println!("Total symbols found: {}", symbols.len()); - + for (i, symbol) in symbols.iter().enumerate() { let sym = symbol.read(); println!("Symbol {}: {} - '{}'", i, sym.symbol_type(), sym.name()); - - if let Some(prop) = sym.as_any().downcast_ref::() { + + if let Some(prop) = sym + .as_any() + .downcast_ref::( + ) { println!(" -> Property type: {:?}", prop.type_); if let Some(inference) = &prop.type_.inference_info { println!(" -> Inference info: {}", inference); } } } - + assert!(symbols.len() > 0, "Expected some symbols to be parsed"); } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/python.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/python.rs index 9c996357a..59a622b3d 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/python.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/python.rs @@ -6,7 +6,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::parsers::python::PythonParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; const MAIN_PY_CODE: &str = include_str!("cases/python/main.py"); const CALCULATOR_PY_CODE: &str = include_str!("cases/python/calculator.py"); @@ -17,25 +19,48 @@ mod tests { #[test] #[ignore] fn parser_test() { - let mut parser: Box = Box::new(PythonParser::new().expect("PythonParser::new")); + let mut parser: Box = + Box::new(PythonParser::new().expect("PythonParser::new")); let path = PathBuf::from("file:///main.py"); base_parser_test(&mut parser, &path, MAIN_PY_CODE, MAIN_PY_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(PythonParser::new().expect("PythonParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/python/calculator.py"); + let mut parser: Box = + Box::new(PythonParser::new().expect("PythonParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/python/calculator.py"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::Python, &mut parser, &file, CALCULATOR_PY_CODE, CALCULATOR_PY_SKELETON); + base_skeletonizer_test( + &LanguageId::Python, + &mut parser, + &file, + CALCULATOR_PY_CODE, + CALCULATOR_PY_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(PythonParser::new().expect("PythonParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/python/calculator.py"); + let mut parser: Box = + Box::new(PythonParser::new().expect("PythonParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/python/calculator.py"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::Python, &mut parser, &file, CALCULATOR_PY_CODE, CALCULATOR_PY_DECLS); + base_declaration_formatter_test( + &LanguageId::Python, + &mut parser, + &file, + CALCULATOR_PY_CODE, + CALCULATOR_PY_DECLS, + ); } } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/rust.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/rust.rs index f98f90791..65bf88094 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/rust.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/rust.rs @@ -6,7 +6,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; use crate::ast::treesitter::parsers::rust::RustParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; const MAIN_RS_CODE: &str = include_str!("cases/rust/main.rs"); const MAIN_RS_SYMBOLS: &str = include_str!("cases/rust/main.rs.json"); @@ -17,25 +19,48 @@ mod tests { #[test] fn parser_test() { - let mut parser: Box = Box::new(RustParser::new().expect("RustParser::new")); + let mut parser: Box = + Box::new(RustParser::new().expect("RustParser::new")); let path = PathBuf::from("file:///main.rs"); base_parser_test(&mut parser, &path, MAIN_RS_CODE, MAIN_RS_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(RustParser::new().expect("RustParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/rust/point.rs"); + let mut parser: Box = + Box::new(RustParser::new().expect("RustParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/rust/point.rs"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::Rust, &mut parser, &file, POINT_RS_CODE, POINT_RS_SKELETON); + base_skeletonizer_test( + &LanguageId::Rust, + &mut parser, + &file, + POINT_RS_CODE, + POINT_RS_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(RustParser::new().expect("RustParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/rust/point.rs"); + let mut parser: Box = + Box::new(RustParser::new().expect("RustParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/rust/point.rs"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::Rust, &mut parser, &file, POINT_RS_CODE, POINT_RS_DECLS); + base_declaration_formatter_test( + &LanguageId::Rust, + &mut parser, + &file, + POINT_RS_CODE, + POINT_RS_DECLS, + ); } } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/tests/ts.rs b/refact-agent/engine/src/ast/treesitter/parsers/tests/ts.rs index b19421ebf..7c34397ac 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/tests/ts.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/tests/ts.rs @@ -5,7 +5,9 @@ mod tests { use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::AstLanguageParser; - use crate::ast::treesitter::parsers::tests::{base_declaration_formatter_test, base_parser_test, base_skeletonizer_test}; + use crate::ast::treesitter::parsers::tests::{ + base_declaration_formatter_test, base_parser_test, base_skeletonizer_test, + }; use crate::ast::treesitter::parsers::ts::TSParser; const MAIN_TS_CODE: &str = include_str!("cases/ts/main.ts"); @@ -17,25 +19,48 @@ mod tests { #[test] fn parser_test() { - let mut parser: Box = Box::new(TSParser::new().expect("TSParser::new")); + let mut parser: Box = + Box::new(TSParser::new().expect("TSParser::new")); let path = PathBuf::from("file:///main.ts"); base_parser_test(&mut parser, &path, MAIN_TS_CODE, MAIN_TS_SYMBOLS); } #[test] fn skeletonizer_test() { - let mut parser: Box = Box::new(TSParser::new().expect("TSParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/ts/person.ts"); + let mut parser: Box = + Box::new(TSParser::new().expect("TSParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/ts/person.ts"); assert!(file.exists()); - base_skeletonizer_test(&LanguageId::TypeScript, &mut parser, &file, PERSON_TS_CODE, PERSON_TS_SKELETON); + base_skeletonizer_test( + &LanguageId::TypeScript, + &mut parser, + &file, + PERSON_TS_CODE, + PERSON_TS_SKELETON, + ); } #[test] fn declaration_formatter_test() { - let mut parser: Box = Box::new(TSParser::new().expect("TSParser::new")); - let file = canonicalize(PathBuf::from(file!())).unwrap().parent().unwrap().join("cases/ts/person.ts"); + let mut parser: Box = + Box::new(TSParser::new().expect("TSParser::new")); + let file = canonicalize(PathBuf::from(file!())) + .unwrap() + .parent() + .unwrap() + .join("cases/ts/person.ts"); assert!(file.exists()); - base_declaration_formatter_test(&LanguageId::TypeScript, &mut parser, &file, PERSON_TS_CODE, PERSON_TS_DECLS); + base_declaration_formatter_test( + &LanguageId::TypeScript, + &mut parser, + &file, + PERSON_TS_CODE, + PERSON_TS_DECLS, + ); } } diff --git a/refact-agent/engine/src/ast/treesitter/parsers/ts.rs b/refact-agent/engine/src/ast/treesitter/parsers/ts.rs index 6f29abec0..08ce7a368 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/ts.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/ts.rs @@ -10,7 +10,11 @@ use similar::DiffableStr; use tree_sitter::{Node, Parser, Range}; use uuid::Uuid; -use crate::ast::treesitter::ast_instance_structs::{AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, VariableDefinition, VariableUsage}; +use crate::ast::treesitter::ast_instance_structs::{ + AstSymbolFields, AstSymbolInstanceArc, ClassFieldDeclaration, CommentDefinition, FunctionArg, + FunctionCall, FunctionDeclaration, ImportDeclaration, ImportType, StructDeclaration, TypeDef, + VariableDefinition, VariableUsage, +}; use crate::ast::treesitter::language_id::LanguageId; use crate::ast::treesitter::parsers::{AstLanguageParser, internal_error, ParserError}; use crate::ast::treesitter::parsers::utils::{CandidateInfo, get_guid}; @@ -142,8 +146,8 @@ impl TSParser { &mut self, info: &CandidateInfo<'a>, code: &str, - candidates: &mut VecDeque>) - -> Vec { + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = StructDeclaration::default(); @@ -154,7 +158,12 @@ impl TSParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -165,7 +174,12 @@ impl TSParser { if let Some(type_parameters) = info.node.child_by_field_name("type_parameters") { for i in 0..type_parameters.child_count() { let child = type_parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(dtype) = parse_type(&child, code) { decl.template_types.push(dtype); } @@ -175,18 +189,27 @@ impl TSParser { // find base classes for i in 0..info.node.child_count() { let class_heritage = info.node.child(i).unwrap(); - symbols.extend(self.find_error_usages(&class_heritage, code, &info.ast_fields.file_path, - &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &class_heritage, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if class_heritage.kind() == "class_heritage" { - for i in 0..class_heritage.child_count() { let extends_clause = class_heritage.child(i).unwrap(); - symbols.extend(self.find_error_usages(&extends_clause, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &extends_clause, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if extends_clause.kind() == "extends_clause" { let mut current_dtype: Option = None; for i in 0..extends_clause.child_count() { let child = extends_clause.child(i).unwrap(); - if let Some(field_name) = extends_clause.field_name_for_child(i as u32) { + if let Some(field_name) = extends_clause.field_name_for_child(i as u32) + { match field_name { "value" => { if let Some(current_dtype) = ¤t_dtype { @@ -199,9 +222,15 @@ impl TSParser { "type_arguments" => { for i in 0..child.child_count() { let child = child.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(dtype) = parse_type(&child, code) { - if let Some(current_dtype) = current_dtype.as_mut() { + if let Some(current_dtype) = current_dtype.as_mut() + { current_dtype.nested_types.push(dtype); } } @@ -248,9 +277,19 @@ impl TSParser { symbols } - fn parse_variable_definition<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_variable_definition<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); let mut decl = VariableDefinition::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -281,7 +320,12 @@ impl TSParser { symbols } - fn parse_field_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, _: &mut VecDeque>) -> Vec { + fn parse_field_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + _: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = ClassFieldDeclaration::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -303,7 +347,12 @@ impl TSParser { symbols } - fn parse_enum_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_enum_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let mut decl = StructDeclaration::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -311,7 +360,12 @@ impl TSParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &info.parent_guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -332,7 +386,8 @@ impl TSParser { field.ast_fields.name = code.slice(name.byte_range()).to_string(); } if let Some(value) = child.child_by_field_name("value") { - field.type_.inference_info = Some(code.slice(value.byte_range()).to_string()); + field.type_.inference_info = + Some(code.slice(value.byte_range()).to_string()); } symbols.push(Arc::new(RwLock::new(Box::new(field)))); } @@ -360,7 +415,12 @@ impl TSParser { symbols } - pub fn parse_function_declaration<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + pub fn parse_function_declaration<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionDeclaration::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -370,7 +430,12 @@ impl TSParser { decl.ast_fields.parent_guid = Some(info.parent_guid.clone()); decl.ast_fields.guid = get_guid(); - symbols.extend(self.find_error_usages(&info.node, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(name) = info.node.child_by_field_name("name") { decl.ast_fields.name = code.slice(name.byte_range()).to_string(); @@ -379,7 +444,12 @@ impl TSParser { if let Some(type_parameters) = info.node.child_by_field_name("type_parameters") { for i in 0..type_parameters.child_count() { let child = type_parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); if let Some(dtype) = parse_type(&child, code) { decl.template_types.push(dtype); } @@ -393,10 +463,20 @@ impl TSParser { start_point: decl.ast_fields.full_range.start_point, end_point: parameters.end_position(), }; - symbols.extend(self.find_error_usages(¶meters, code, &decl.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + ¶meters, + code, + &decl.ast_fields.file_path, + &decl.ast_fields.guid, + )); for i in 0..parameters.child_count() { let child = parameters.child(i).unwrap(); - symbols.extend(self.find_error_usages(&child, code, &info.ast_fields.file_path, &decl.ast_fields.guid)); + symbols.extend(self.find_error_usages( + &child, + code, + &info.ast_fields.file_path, + &decl.ast_fields.guid, + )); match child.kind() { "optional_parameter" | "required_parameter" => { let mut arg = FunctionArg::default(); @@ -408,10 +488,12 @@ impl TSParser { } if let Some(value) = child.child_by_field_name("value") { if let Some(dtype) = arg.type_.as_mut() { - dtype.inference_info = Some(code.slice(value.byte_range()).to_string()); + dtype.inference_info = + Some(code.slice(value.byte_range()).to_string()); } else { let mut dtype = TypeDef::default(); - dtype.inference_info = Some(code.slice(value.byte_range()).to_string()); + dtype.inference_info = + Some(code.slice(value.byte_range()).to_string()); arg.type_ = Some(dtype); } } @@ -460,8 +542,8 @@ impl TSParser { &mut self, info: &CandidateInfo<'a>, code: &str, - candidates: &mut VecDeque>) - -> Vec { + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = Default::default(); let mut decl = FunctionCall::default(); decl.ast_fields = AstSymbolFields::from_fields(&info.ast_fields); @@ -473,7 +555,12 @@ impl TSParser { } decl.ast_fields.caller_guid = Some(get_guid()); - symbols.extend(self.find_error_usages(&info.node, code, &info.ast_fields.file_path, &info.parent_guid)); + symbols.extend(self.find_error_usages( + &info.node, + code, + &info.ast_fields.file_path, + &info.parent_guid, + )); if let Some(function) = info.node.child_by_field_name("function") { let kind = function.kind(); @@ -532,7 +619,13 @@ impl TSParser { symbols } - fn find_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn find_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); for i in 0..parent.child_count() { let child = parent.child(i).unwrap(); @@ -543,7 +636,13 @@ impl TSParser { symbols } - fn parse_error_usages(&mut self, parent: &Node, code: &str, path: &PathBuf, parent_guid: &Uuid) -> Vec { + fn parse_error_usages( + &mut self, + parent: &Node, + code: &str, + path: &PathBuf, + parent_guid: &Uuid, + ) -> Vec { let mut symbols: Vec = Default::default(); match parent.kind() { "identifier" /*| "field_identifier"*/ => { @@ -591,13 +690,18 @@ impl TSParser { symbols } - fn parse_usages_<'a>(&mut self, info: &CandidateInfo<'a>, code: &str, candidates: &mut VecDeque>) -> Vec { + fn parse_usages_<'a>( + &mut self, + info: &CandidateInfo<'a>, + code: &str, + candidates: &mut VecDeque>, + ) -> Vec { let mut symbols: Vec = vec![]; let kind = info.node.kind(); #[cfg(test)] #[allow(unused)] - let text = code.slice(info.node.byte_range()); + let text = code.slice(info.node.byte_range()); match kind { "class_declaration" | "class" | "interface_declaration" | "type_alias_declaration" => { symbols.extend(self.parse_struct_declaration(info, code, candidates)); @@ -791,8 +895,10 @@ impl TSParser { let symbols_l = self.parse_usages_(&candidate, code, &mut candidates); symbols.extend(symbols_l); } - let guid_to_symbol_map = symbols.iter() - .map(|s| (s.clone().read().guid().clone(), s.clone())).collect::>(); + let guid_to_symbol_map = symbols + .iter() + .map(|s| (s.clone().read().guid().clone(), s.clone())) + .collect::>(); for symbol in symbols.iter_mut() { let guid = symbol.read().guid().clone(); if let Some(parent_guid) = symbol.read().parent_guid() { @@ -806,10 +912,20 @@ impl TSParser { { for symbol in symbols.iter_mut() { let mut sym = symbol.write(); - sym.fields_mut().childs_guid = sym.fields_mut().childs_guid.iter() + sym.fields_mut().childs_guid = sym + .fields_mut() + .childs_guid + .iter() .sorted_by_key(|x| { - guid_to_symbol_map.get(*x).unwrap().read().full_range().start_byte - }).map(|x| x.clone()).collect(); + guid_to_symbol_map + .get(*x) + .unwrap() + .read() + .full_range() + .start_byte + }) + .map(|x| x.clone()) + .collect(); } } @@ -824,5 +940,3 @@ impl AstLanguageParser for TSParser { symbols } } - - diff --git a/refact-agent/engine/src/ast/treesitter/parsers/utils.rs b/refact-agent/engine/src/ast/treesitter/parsers/utils.rs index ff85f06c9..24a409a2f 100644 --- a/refact-agent/engine/src/ast/treesitter/parsers/utils.rs +++ b/refact-agent/engine/src/ast/treesitter/parsers/utils.rs @@ -7,7 +7,10 @@ pub(crate) fn get_guid() -> Uuid { Uuid::new_v4() } -pub(crate) fn get_children_guids(parent_guid: &Uuid, children: &Vec) -> Vec { +pub(crate) fn get_children_guids( + parent_guid: &Uuid, + children: &Vec, +) -> Vec { let mut result = Vec::new(); for child in children { let child_ref = child.read(); @@ -20,7 +23,6 @@ pub(crate) fn get_children_guids(parent_guid: &Uuid, children: &Vec { pub ast_fields: AstSymbolFields, pub node: Node<'a>, diff --git a/refact-agent/engine/src/ast/treesitter/skeletonizer.rs b/refact-agent/engine/src/ast/treesitter/skeletonizer.rs index 8d0b26cc6..a0dfb1f37 100644 --- a/refact-agent/engine/src/ast/treesitter/skeletonizer.rs +++ b/refact-agent/engine/src/ast/treesitter/skeletonizer.rs @@ -10,12 +10,16 @@ use crate::ast::treesitter::structs::SymbolType; struct BaseSkeletonFormatter; pub trait SkeletonFormatter { - fn make_skeleton(&self, - symbol: &SymbolInformation, - text: &String, - guid_to_children: &HashMap>, - guid_to_info: &HashMap) -> String { - let mut res_line = symbol.get_declaration_content(text).unwrap() + fn make_skeleton( + &self, + symbol: &SymbolInformation, + text: &String, + guid_to_children: &HashMap>, + guid_to_info: &HashMap, + ) -> String { + let mut res_line = symbol + .get_declaration_content(text) + .unwrap() .split("\n") .map(|x| x.trim_start().trim_end().to_string()) .collect::>(); @@ -30,7 +34,9 @@ pub trait SkeletonFormatter { let child_symbol = guid_to_info.get(&child).unwrap(); match child_symbol.symbol_type { SymbolType::FunctionDeclaration | SymbolType::ClassFieldDeclaration => { - let mut content = child_symbol.get_declaration_content(text).unwrap() + let mut content = child_symbol + .get_declaration_content(text) + .unwrap() .split("\n") .map(|x| x.trim_start().trim_end().to_string()) .collect::>(); @@ -58,34 +64,55 @@ pub trait SkeletonFormatter { if content.is_empty() { return vec![]; } - let lines = content.iter() - .map(|x| x.replace("\r", "") - .replace("\t", " ").to_string()) + let lines = content + .iter() + .map(|x| x.replace("\r", "").replace("\t", " ").to_string()) .collect::>(); - let indent_n = content.iter().map(|x| { - if x.is_empty() { - return usize::MAX; - } else { - x.len() - x.trim_start().len() - } - }).min().unwrap_or(0); + let indent_n = content + .iter() + .map(|x| { + if x.is_empty() { + return usize::MAX; + } else { + x.len() - x.trim_start().len() + } + }) + .min() + .unwrap_or(0); let intent = " ".repeat(indent_n).to_string(); - lines.iter().map(|x| if x.starts_with(&intent) { - x[indent_n..x.len()].to_string() - } else {x.to_string()}).collect::>() + lines + .iter() + .map(|x| { + if x.starts_with(&intent) { + x[indent_n..x.len()].to_string() + } else { + x.to_string() + } + }) + .collect::>() } - fn get_declaration_with_comments(&self, - symbol: &SymbolInformation, - text: &String, - _guid_to_children: &HashMap>, - guid_to_info: &HashMap) -> (String, (usize, usize)) { + fn get_declaration_with_comments( + &self, + symbol: &SymbolInformation, + text: &String, + _guid_to_children: &HashMap>, + guid_to_info: &HashMap, + ) -> (String, (usize, usize)) { let mut res_line: VecDeque = Default::default(); let mut top_row = symbol.full_range.start_point.row; - let mut all_top_syms = guid_to_info.values().filter(|info| info.full_range.start_point.row < top_row).collect::>(); + let mut all_top_syms = guid_to_info + .values() + .filter(|info| info.full_range.start_point.row < top_row) + .collect::>(); // reverse sort - all_top_syms.sort_by(|a, b| b.full_range.start_point.row.cmp(&a.full_range.start_point.row)); + all_top_syms.sort_by(|a, b| { + b.full_range + .start_point + .row + .cmp(&a.full_range.start_point.row) + }); let mut need_syms: Vec<&&SymbolInformation> = vec![]; { @@ -94,20 +121,25 @@ pub trait SkeletonFormatter { if sym.symbol_type != SymbolType::CommentDefinition { break; } - let all_sym_on_this_line = all_top_syms.iter() - .filter(|info| - info.full_range.start_point.row == sym.full_range.start_point.row || - info.full_range.end_point.row == sym.full_range.start_point.row).collect::>(); + let all_sym_on_this_line = all_top_syms + .iter() + .filter(|info| { + info.full_range.start_point.row == sym.full_range.start_point.row + || info.full_range.end_point.row == sym.full_range.start_point.row + }) + .collect::>(); - if all_sym_on_this_line.iter().all(|info| info.symbol_type == SymbolType::CommentDefinition) { + if all_sym_on_this_line + .iter() + .all(|info| info.symbol_type == SymbolType::CommentDefinition) + { need_syms.push(sym); } else { - break + break; } } } - for sym in need_syms { if sym.symbol_type != SymbolType::CommentDefinition { break; @@ -118,9 +150,7 @@ pub trait SkeletonFormatter { content.pop(); } let lines = content.split("\n").collect::>(); - let lines = lines.iter() - .map(|x| x.to_string()) - .collect::>(); + let lines = lines.iter().map(|x| x.to_string()).collect::>(); lines.into_iter().rev().for_each(|x| res_line.push_front(x)); } @@ -129,7 +159,10 @@ pub trait SkeletonFormatter { if res_line.is_empty() { return ("".to_string(), (top_row, bottom_row)); } - let mut content = symbol.get_declaration_content(text).unwrap().split("\n") + let mut content = symbol + .get_declaration_content(text) + .unwrap() + .split("\n") .map(|x| x.trim_end().to_string()) .collect::>(); if let Some(last) = content.last_mut() { @@ -139,7 +172,10 @@ pub trait SkeletonFormatter { } res_line.extend(content.into_iter()); } else if symbol.symbol_type == SymbolType::FunctionDeclaration { - let content = symbol.get_content(text).unwrap().split("\n") + let content = symbol + .get_content(text) + .unwrap() + .split("\n") .map(|x| x.to_string()) .collect::>(); res_line.extend(content.into_iter()); @@ -156,6 +192,6 @@ impl SkeletonFormatter for BaseSkeletonFormatter {} pub fn make_formatter(language_id: &LanguageId) -> Box { match language_id { LanguageId::Python => Box::new(PythonSkeletonFormatter {}), - _ => Box::new(BaseSkeletonFormatter {}) + _ => Box::new(BaseSkeletonFormatter {}), } } diff --git a/refact-agent/engine/src/ast/treesitter/structs.rs b/refact-agent/engine/src/ast/treesitter/structs.rs index 23fe4a3b3..a28054468 100644 --- a/refact-agent/engine/src/ast/treesitter/structs.rs +++ b/refact-agent/engine/src/ast/treesitter/structs.rs @@ -57,7 +57,7 @@ impl FromStr for SymbolType { "comment_definition" => SymbolType::CommentDefinition, "function_call" => SymbolType::FunctionCall, "variable_usage" => SymbolType::VariableUsage, - _ => SymbolType::Unknown + _ => SymbolType::Unknown, }); } } diff --git a/refact-agent/engine/src/at_commands/at_ast_definition.rs b/refact-agent/engine/src/at_commands/at_ast_definition.rs index ae34b7c5b..6bbd3dc61 100644 --- a/refact-agent/engine/src/at_commands/at_ast_definition.rs +++ b/refact-agent/engine/src/at_commands/at_ast_definition.rs @@ -9,7 +9,6 @@ use crate::at_commands::execute_at::{AtCommandMember, correct_at_arg}; use crate::custom_error::trace_and_default; // use strsim::jaro_winkler; - #[derive(Debug)] pub struct AtParamSymbolPathQuery; @@ -44,20 +43,14 @@ pub struct AtAstDefinition { impl AtAstDefinition { pub fn new() -> Self { AtAstDefinition { - params: vec![ - Box::new(AtParamSymbolPathQuery::new()) - ], + params: vec![Box::new(AtParamSymbolPathQuery::new())], } } } #[async_trait] impl AtParam for AtParamSymbolPathQuery { - async fn is_value_valid( - &self, - _ccx: Arc>, - value: &String, - ) -> bool { + async fn is_value_valid(&self, _ccx: Arc>, value: &String) -> bool { !value.is_empty() } @@ -80,7 +73,9 @@ impl AtParam for AtParamSymbolPathQuery { } let ast_index = ast_service_opt.unwrap().lock().await.ast_index.clone(); - definition_paths_fuzzy(ast_index, value, top_n, 1000).await.unwrap_or_else(trace_and_default) + definition_paths_fuzzy(ast_index, value, top_n, 1000) + .await + .unwrap_or_else(trace_and_default) } fn param_completion_valid(&self) -> bool { @@ -107,7 +102,7 @@ impl AtCommand for AtAstDefinition { cmd.reason = Some("parameter is missing".to_string()); args.clear(); return Err("parameter `symbol` is missing".to_string()); - }, + } }; correct_at_arg(ccx.clone(), &self.params[0], &mut arg_symbol).await; @@ -118,18 +113,26 @@ impl AtCommand for AtAstDefinition { let ast_service_opt = gcx.read().await.ast_service.clone(); if let Some(ast_service) = ast_service_opt { let ast_index = ast_service.lock().await.ast_index.clone(); - let defs: Vec> = crate::ast::ast_db::definitions(ast_index, arg_symbol.text.as_str())?; + let defs: Vec> = + crate::ast::ast_db::definitions(ast_index, arg_symbol.text.as_str())?; let file_paths = defs.iter().map(|x| x.cpath.clone()).collect::>(); - let short_file_paths = crate::files_correction::shortify_paths(gcx.clone(), &file_paths).await; + let short_file_paths = + crate::files_correction::shortify_paths(gcx.clone(), &file_paths).await; let text = if let Some(path0) = short_file_paths.get(0) { if short_file_paths.len() > 1 { - format!("`{}` (defined in {} and other files)", &arg_symbol.text, path0) + format!( + "`{}` (defined in {} and other files)", + &arg_symbol.text, path0 + ) } else { format!("`{}` (defined in {})", &arg_symbol.text, path0) } } else { - format!("`{}` (definition not found in the AST tree)", &arg_symbol.text) + format!( + "`{}` (definition not found in the AST tree)", + &arg_symbol.text + ) }; let mut result = vec![]; @@ -139,13 +142,20 @@ impl AtCommand for AtAstDefinition { file_content: "".to_string(), line1: res.full_line1(), line2: res.full_line2(), + file_rev: None, symbols: vec![res.path_drop0()], gradient_type: 4, usefulness: 100.0, skip_pp: false, }); } - Ok((result.into_iter().map(|x| ContextEnum::ContextFile(x)).collect::>(), text)) + Ok(( + result + .into_iter() + .map(|x| ContextEnum::ContextFile(x)) + .collect::>(), + text, + )) } else { Err("attempt to use @definition with no ast turned on".to_string()) } diff --git a/refact-agent/engine/src/at_commands/at_ast_reference.rs b/refact-agent/engine/src/at_commands/at_ast_reference.rs deleted file mode 100644 index a64ff0410..000000000 --- a/refact-agent/engine/src/at_commands/at_ast_reference.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::sync::Arc; - -use async_trait::async_trait; -use tokio::sync::Mutex as AMutex; - -use crate::at_commands::at_commands::{AtCommand, AtCommandsContext, AtParam}; -use crate::call_validation::{ContextFile, ContextEnum}; -use crate::at_commands::execute_at::{AtCommandMember, correct_at_arg}; -use crate::at_commands::at_ast_definition::AtParamSymbolPathQuery; -use crate::custom_error::trace_and_default; - - -pub struct AtAstReference { - pub params: Vec>, -} - -impl AtAstReference { - pub fn new() -> Self { - AtAstReference { - params: vec![ - Box::new(AtParamSymbolPathQuery::new()) - ], - } - } -} - - -#[async_trait] -impl AtCommand for AtAstReference { - fn params(&self) -> &Vec> { - &self.params - } - - async fn at_execute( - &self, - ccx: Arc>, - cmd: &mut AtCommandMember, - args: &mut Vec, - ) -> Result<(Vec, String), String> { - let mut arg_symbol = match args.get(0) { - Some(x) => x.clone(), - None => { - cmd.ok = false; - cmd.reason = Some("no symbol path".to_string()); - args.clear(); - return Err("no symbol path".to_string()); - }, - }; - - correct_at_arg(ccx.clone(), &self.params[0], &mut arg_symbol).await; - args.clear(); - args.push(arg_symbol.clone()); - - let gcx = ccx.lock().await.global_context.clone(); - let ast_service_opt = gcx.read().await.ast_service.clone(); - - if let Some(ast_service) = ast_service_opt { - let ast_index = ast_service.lock().await.ast_index.clone(); - let defs = crate::ast::ast_db::definitions(ast_index.clone(), arg_symbol.text.as_str()) - .unwrap_or_else(trace_and_default); - let mut all_results = vec![]; - let mut messages = vec![]; - - const USAGES_LIMIT: usize = 20; - - if let Some(def) = defs.get(0) { - let usages: Vec<(Arc, usize)> = crate::ast::ast_db::usages( - ast_index.clone(), - def.path(), - 100, - ).unwrap_or_else(trace_and_default); - let usage_count = usages.len(); - - let text = format!( - "symbol `{}` has {} usages", - arg_symbol.text, - usage_count - ); - messages.push(text); - - for (usedin, uline) in usages.iter().take(USAGES_LIMIT) { - all_results.push(ContextFile { - file_name: usedin.cpath.clone(), - file_content: "".to_string(), - line1: *uline, - line2: *uline, - symbols: vec![usedin.path_drop0()], - gradient_type: 4, - usefulness: 100.0, - skip_pp: false, - }); - } - if usage_count > USAGES_LIMIT { - messages.push(format!("...and {} more usages", usage_count - USAGES_LIMIT)); - } - } else { - messages.push("No definitions found for the symbol".to_string()); - } - - Ok((all_results.into_iter().map(|x| ContextEnum::ContextFile(x)).collect::>(), messages.join("\n"))) - } else { - Err("attempt to use @references with no ast turned on".to_string()) - } - } - - fn depends_on(&self) -> Vec { - vec!["ast".to_string()] - } -} diff --git a/refact-agent/engine/src/at_commands/at_commands.rs b/refact-agent/engine/src/at_commands/at_commands.rs index fdd0b46e7..5da15602d 100644 --- a/refact-agent/engine/src/at_commands/at_commands.rs +++ b/refact-agent/engine/src/at_commands/at_commands.rs @@ -1,22 +1,26 @@ use indexmap::IndexMap; use std::collections::HashMap; use std::sync::Arc; +use std::sync::atomic::AtomicBool; use tokio::sync::mpsc; use async_trait::async_trait; use tokio::sync::Mutex as AMutex; use tokio::sync::RwLock as ARwLock; -use crate::call_validation::{ChatMessage, ContextFile, ContextEnum, SubchatParameters, PostprocessSettings}; +use crate::call_validation::{ + ChatMessage, ContextFile, ContextEnum, SubchatParameters, PostprocessSettings, +}; +use crate::chat::types::TaskMeta; use crate::global_context::GlobalContext; use crate::at_commands::at_file::AtFile; use crate::at_commands::at_ast_definition::AtAstDefinition; -use crate::at_commands::at_ast_reference::AtAstReference; use crate::at_commands::at_tree::AtTree; use crate::at_commands::at_web::AtWeb; use crate::at_commands::execute_at::AtCommandMember; +pub const MAX_SUBCHAT_DEPTH: usize = 5; pub struct AtCommandsContext { pub global_context: Arc>, @@ -27,17 +31,21 @@ pub struct AtCommandsContext { #[allow(dead_code)] pub is_preview: bool, pub pp_skeleton: bool, - pub correction_only_up_to_step: usize, // suppresses context_file messages, writes a correction message instead + #[allow(dead_code)] + pub correction_only_up_to_step: usize, pub chat_id: String, + pub root_chat_id: String, pub current_model: String, - pub should_execute_remotely: bool, + pub task_meta: Option, + pub subchat_depth: usize, - pub at_commands: HashMap>, // a copy from static constant + pub at_commands: HashMap>, pub subchat_tool_parameters: IndexMap, pub postprocess_parameters: PostprocessSettings, - pub subchat_tx: Arc>>, // one and only supported format for now {"tool_call_id": xx, "subchat_id": xx, "add_message": {...}} + pub subchat_tx: Arc>>, pub subchat_rx: Arc>>, + pub abort_flag: Arc, } impl AtCommandsContext { @@ -48,29 +56,59 @@ impl AtCommandsContext { is_preview: bool, messages: Vec, chat_id: String, - should_execute_remotely: bool, + root_chat_id: Option, current_model: String, + task_meta: Option, + ) -> Self { + Self::new_with_abort( + global_context, + n_ctx, + top_n, + is_preview, + messages, + chat_id, + root_chat_id, + current_model, + task_meta, + None, + ) + .await + } + + pub async fn new_with_abort( + global_context: Arc>, + n_ctx: usize, + top_n: usize, + is_preview: bool, + messages: Vec, + chat_id: String, + root_chat_id: Option, + current_model: String, + task_meta: Option, + abort_flag: Option>, ) -> Self { let (tx, rx) = mpsc::unbounded_channel::(); + let effective_root = root_chat_id.unwrap_or_else(|| chat_id.clone()); AtCommandsContext { global_context: global_context.clone(), n_ctx, top_n, - tokens_for_rag: 0, + tokens_for_rag: (n_ctx / 4).max(64).min(n_ctx), messages, is_preview, pp_skeleton: true, correction_only_up_to_step: 0, chat_id, + root_chat_id: effective_root, current_model, - should_execute_remotely, - + task_meta, + subchat_depth: 0, at_commands: at_commands_dict(global_context.clone()).await, subchat_tool_parameters: IndexMap::new(), postprocess_parameters: PostprocessSettings::new(), - subchat_tx: Arc::new(AMutex::new(tx)), subchat_rx: Arc::new(AMutex::new(rx)), + abort_flag: abort_flag.unwrap_or_else(|| Arc::new(AtomicBool::new(false))), } } } @@ -79,36 +117,73 @@ impl AtCommandsContext { pub trait AtCommand: Send + Sync { fn params(&self) -> &Vec>; // returns (messages_for_postprocessing, text_on_clip) - async fn at_execute(&self, ccx: Arc>, cmd: &mut AtCommandMember, args: &mut Vec) -> Result<(Vec, String), String>; - fn depends_on(&self) -> Vec { vec![] } // "ast", "vecdb" + async fn at_execute( + &self, + ccx: Arc>, + cmd: &mut AtCommandMember, + args: &mut Vec, + ) -> Result<(Vec, String), String>; + fn depends_on(&self) -> Vec { + vec![] + } // "ast", "vecdb" } #[async_trait] pub trait AtParam: Send + Sync { async fn is_value_valid(&self, ccx: Arc>, value: &String) -> bool; - async fn param_completion(&self, ccx: Arc>, value: &String) -> Vec; - fn param_completion_valid(&self) -> bool {false} + async fn param_completion( + &self, + ccx: Arc>, + value: &String, + ) -> Vec; + fn param_completion_valid(&self) -> bool { + false + } } -pub async fn at_commands_dict(gcx: Arc>) -> HashMap> { +pub async fn at_commands_dict( + gcx: Arc>, +) -> HashMap> { let at_commands_dict = HashMap::from([ - ("@file".to_string(), Arc::new(AtFile::new()) as Arc), + ( + "@file".to_string(), + Arc::new(AtFile::new()) as Arc, + ), // ("@file-search".to_string(), Arc::new(AtFileSearch::new()) as Arc), - ("@definition".to_string(), Arc::new(AtAstDefinition::new()) as Arc), - ("@references".to_string(), Arc::new(AtAstReference::new()) as Arc), + ( + "@definition".to_string(), + Arc::new(AtAstDefinition::new()) as Arc, + ), // ("@local-notes-to-self".to_string(), Arc::new(AtLocalNotesToSelf::new()) as Arc), - ("@tree".to_string(), Arc::new(AtTree::new()) as Arc), + ( + "@tree".to_string(), + Arc::new(AtTree::new()) as Arc, + ), // ("@diff".to_string(), Arc::new(AtDiff::new()) as Arc), // ("@diff-rev".to_string(), Arc::new(AtDiffRev::new()) as Arc), - ("@web".to_string(), Arc::new(AtWeb::new()) as Arc), - ("@search".to_string(), Arc::new(crate::at_commands::at_search::AtSearch::new()) as Arc), - ("@knowledge-load".to_string(), Arc::new(crate::at_commands::at_knowledge::AtLoadKnowledge::new()) as Arc), + ( + "@web".to_string(), + Arc::new(AtWeb::new()) as Arc, + ), + ( + "@search".to_string(), + Arc::new(crate::at_commands::at_search::AtSearch::new()) as Arc, + ), + ( + "@knowledge-load".to_string(), + Arc::new(crate::at_commands::at_knowledge::AtLoadKnowledge::new()) + as Arc, + ), ]); let (ast_on, vecdb_on, active_group_id) = { let gcx_locked = gcx.read().await; let vecdb_on = gcx_locked.vec_db.lock().await.is_some(); - (gcx_locked.ast_service.is_some(), vecdb_on, gcx_locked.active_group_id.clone()) + ( + gcx_locked.ast_service.is_some(), + vecdb_on, + gcx_locked.active_group_id.clone(), + ) }; let allow_knowledge = active_group_id.is_some(); let mut result = HashMap::new(); @@ -130,13 +205,20 @@ pub async fn at_commands_dict(gcx: Arc>) -> HashMap) -> Vec { - x.into_iter().map(|i|ContextEnum::ContextFile(i)).collect::>() + x.into_iter() + .map(|i| ContextEnum::ContextFile(i)) + .collect::>() } pub fn filter_only_context_file_from_context_tool(tools: &Vec) -> Vec { - tools.iter() + tools + .iter() .filter_map(|x| { - if let ContextEnum::ContextFile(data) = x { Some(data.clone()) } else { None } - }).collect::>() + if let ContextEnum::ContextFile(data) = x { + Some(data.clone()) + } else { + None + } + }) + .collect::>() } - diff --git a/refact-agent/engine/src/at_commands/at_file.rs b/refact-agent/engine/src/at_commands/at_file.rs index e37c34ec3..d893d5b86 100644 --- a/refact-agent/engine/src/at_commands/at_file.rs +++ b/refact-agent/engine/src/at_commands/at_file.rs @@ -4,13 +4,53 @@ use regex::Regex; use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; use std::sync::Arc; -use crate::at_commands::at_commands::{AtCommand, AtCommandsContext, AtParam, vec_context_file_to_context_tools}; +use crate::at_commands::at_commands::{ + AtCommand, AtCommandsContext, AtParam, vec_context_file_to_context_tools, +}; use crate::at_commands::execute_at::{AtCommandMember, correct_at_arg}; use crate::files_in_workspace::get_file_text_from_memory_or_disk; use crate::call_validation::{ContextFile, ContextEnum}; -use crate::files_correction::{correct_to_nearest_filename, correct_to_nearest_dir_path, shortify_paths, get_project_dirs}; +use crate::files_correction::{ + correct_to_nearest_filename, correct_to_nearest_dir_path, shortify_paths, get_project_dirs, +}; use crate::global_context::GlobalContext; +pub async fn resolve_file_path_directly( + gcx: Arc>, + path_with_colon: &str, +) -> Option { + let mut path_str = path_with_colon.to_string(); + let colon_range = colon_lines_range_from_arg(&mut path_str); + + let path = PathBuf::from(&path_str); + + if path.is_absolute() { + if path.is_file() { + let mut result = path.to_string_lossy().to_string(); + put_colon_back_to_arg(&mut result, &colon_range); + return Some(result); + } + return None; + } + + let project_dirs = get_project_dirs(gcx.clone()).await; + let mut matches = Vec::new(); + + for pd in &project_dirs { + let full_path = pd.join(&path); + if full_path.is_file() { + matches.push(full_path); + } + } + + if matches.len() == 1 { + let mut result = matches[0].to_string_lossy().to_string(); + put_colon_back_to_arg(&mut result, &colon_range); + return Some(result); + } + + None +} pub struct AtFile { pub params: Vec>, @@ -19,9 +59,7 @@ pub struct AtFile { impl AtFile { pub fn new() -> Self { AtFile { - params: vec![ - Box::new(AtParamFilePath::new()) - ], + params: vec![Box::new(AtParamFilePath::new())], } } } @@ -58,25 +96,41 @@ pub fn colon_lines_range_from_arg(value: &mut String) -> Option (Some(line1), Some(line2)) => { let line1 = line1.as_str().parse::().unwrap_or(0); let line2 = line2.as_str().parse::().unwrap_or(0); - Some(ColonLinesRange { kind: RangeKind::Range, line1, line2 }) - }, + Some(ColonLinesRange { + kind: RangeKind::Range, + line1, + line2, + }) + } (Some(line1), None) => { let line1 = line1.as_str().parse::().unwrap_or(0); - Some(ColonLinesRange { kind: RangeKind::GradToCursorSuffix, line1, line2: 0 }) - }, + Some(ColonLinesRange { + kind: RangeKind::GradToCursorSuffix, + line1, + line2: 0, + }) + } (None, Some(line2)) => { let line2 = line2.as_str().parse::().unwrap_or(0); - Some(ColonLinesRange { kind: RangeKind::GradToCursorPrefix, line1: 0, line2 }) - }, + Some(ColonLinesRange { + kind: RangeKind::GradToCursorPrefix, + line1: 0, + line2, + }) + } _ => None, - } + }; } let re_one_number = Regex::new(r":(\d+)$").unwrap(); if let Some(captures) = re_one_number.captures(value.clone().as_str()) { *value = re_one_number.replace(value, "").to_string(); if let Some(line1) = captures.get(1) { let line = line1.as_str().parse::().unwrap_or(0); - return Some(ColonLinesRange { kind: RangeKind::GradToCursorTwoSided, line1: line, line2: 0 }); + return Some(ColonLinesRange { + kind: RangeKind::GradToCursorTwoSided, + line1: line, + line2: 0, + }); } } None @@ -106,23 +160,22 @@ pub async fn file_repair_candidates( gcx: Arc>, value: &String, top_n: usize, - fuzzy: bool + fuzzy: bool, ) -> Vec { let mut correction_candidate = value.clone(); let colon_mb = colon_lines_range_from_arg(&mut correction_candidate); - let result: Vec = correct_to_nearest_filename( - gcx.clone(), - &correction_candidate, - fuzzy, - top_n, - ).await; - - result.iter().map(|x| { - let mut x = x.clone(); - put_colon_back_to_arg(&mut x, &colon_mb); - x - }).collect() + let result: Vec = + correct_to_nearest_filename(gcx.clone(), &correction_candidate, fuzzy, top_n).await; + + result + .iter() + .map(|x| { + let mut x = x.clone(); + put_colon_back_to_arg(&mut x, &colon_mb); + x + }) + .collect() } pub async fn return_one_candidate_or_a_good_error( @@ -131,50 +184,84 @@ pub async fn return_one_candidate_or_a_good_error( candidates: &Vec, project_paths: &Vec, dirs: bool, -) -> Result{ +) -> Result { let mut f_path = PathBuf::from(file_path); if candidates.is_empty() { let similar_paths_str = if dirs { - correct_to_nearest_dir_path(gcx.clone(), file_path, true, 10).await.join("\n") + correct_to_nearest_dir_path(gcx.clone(), file_path, true, 10) + .await + .join("\n") } else { - let name_only = f_path.file_name().ok_or(format!("unable to get file name from path: {:?}", f_path))?.to_string_lossy().to_string(); - let x = file_repair_candidates(gcx.clone(), &name_only, 10, true).await.iter().cloned().take(10).collect::>(); + let name_only = f_path + .file_name() + .ok_or(format!("unable to get file name from path: {:?}", f_path))? + .to_string_lossy() + .to_string(); + let x = file_repair_candidates(gcx.clone(), &name_only, 10, true) + .await + .iter() + .cloned() + .take(10) + .collect::>(); let shortified_file_names = shortify_paths(gcx.clone(), &x).await; shortified_file_names.join("\n") }; if f_path.is_absolute() { - if !project_paths.iter().any(|x|f_path.starts_with(x)) { - return Err(format!("Path {:?} is outside of project directories:\n{:?}", f_path, project_paths)); + if !project_paths.iter().any(|x| f_path.starts_with(x)) { + return Err(format!( + "Path {:?} is outside of project directories:\n{:?}", + f_path, project_paths + )); } return if similar_paths_str.is_empty() { - Err(format!("The path {:?} does not exist. There are no similar names either.", f_path)) + Err(format!( + "The path {:?} does not exist. There are no similar names either.", + f_path + )) } else { - Err(format!("The path {:?} does not exist. There are paths with similar names however:\n{}", f_path, similar_paths_str)) - } + Err(format!( + "The path {:?} does not exist. There are paths with similar names however:\n{}", + f_path, similar_paths_str + )) + }; } if f_path.is_relative() { - let projpath_options = project_paths.iter().map(|x| x.join(&f_path)) - .filter(|x| if dirs { x.is_dir() } else { x.is_file() }).collect::>(); + let projpath_options = project_paths + .iter() + .map(|x| x.join(&f_path)) + .filter(|x| if dirs { x.is_dir() } else { x.is_file() }) + .collect::>(); if projpath_options.len() > 1 { - let projpath_options_str = projpath_options.iter().map(|x|x.to_string_lossy().to_string()).collect::>().join("\n"); + let projpath_options_str = projpath_options + .iter() + .map(|x| x.to_string_lossy().to_string()) + .collect::>() + .join("\n"); return Err(format!("The path {:?} is ambiguous. Adding project path, it might be:\n{:?}\nAlso, there are similar filepaths:\n{}", f_path, projpath_options_str, similar_paths_str)); } return if projpath_options.is_empty() { if similar_paths_str.is_empty() { - Err(format!("The path {:?} does not exist. There are no similar names either.", f_path)) + Err(format!( + "The path {:?} does not exist. There are no similar names either.", + f_path + )) } else { Err(format!("The path {:?} does not exist. There are paths with similar names however:\n{}", f_path, similar_paths_str)) } } else { f_path = projpath_options[0].clone(); Ok(f_path.to_string_lossy().to_string()) - } + }; } } if candidates.len() > 1 { - return Err(format!("The path {:?} is ambiguous. It could be interpreted as:\n{}", file_path, candidates.join("\n"))); + return Err(format!( + "The path {:?} is ambiguous. It could be interpreted as:\n{}", + file_path, + candidates.join("\n") + )); } // XXX: sometimes it's relative path which looks OK but doesn't work @@ -185,7 +272,6 @@ pub async fn return_one_candidate_or_a_good_error( Ok(candidate) } - #[derive(Debug)] pub struct AtParamFilePath {} @@ -195,15 +281,21 @@ impl AtParamFilePath { } } - #[async_trait] impl AtParam for AtParamFilePath { - async fn is_value_valid( - &self, - _ccx: Arc>, - _value: &String, - ) -> bool { - return true; + async fn is_value_valid(&self, _ccx: Arc>, value: &String) -> bool { + if value.is_empty() { + return false; + } + let trimmed = value.trim(); + if trimmed.is_empty() || trimmed == ":" { + return false; + } + let re = Regex::new(r"^:(\d+)?-(\d+)?$").unwrap(); + if re.is_match(trimmed) { + return false; + } + true } async fn param_completion( @@ -223,9 +315,16 @@ impl AtParam for AtParamFilePath { let file_path = PathBuf::from(value); if file_path.is_relative() { let project_dirs = get_project_dirs(gcx.clone()).await; - let options = project_dirs.iter().map(|x|x.join(&file_path)).filter(|x|x.is_file()).collect::>(); + let options = project_dirs + .iter() + .map(|x| x.join(&file_path)) + .filter(|x| x.is_file()) + .collect::>(); if !options.is_empty() { - let res = options.iter().map(|x| x.to_string_lossy().to_string()).collect(); + let res = options + .iter() + .map(|x| x.to_string_lossy().to_string()) + .collect(); return shortify_paths(gcx.clone(), &res).await; } } @@ -233,10 +332,11 @@ impl AtParam for AtParamFilePath { shortify_paths(gcx.clone(), &res).await } - fn param_completion_valid(&self) -> bool {true} + fn param_completion_valid(&self) -> bool { + true + } } - pub async fn context_file_from_file_path( gcx: Arc>, file_path_hopefully_corrected: String, @@ -247,7 +347,8 @@ pub async fn context_file_from_file_path( let colon_kind_mb = colon_lines_range_from_arg(&mut file_path_no_colon); let gradient_type = gradient_type_from_range_kind(&colon_kind_mb); - let file_content = get_file_text_from_memory_or_disk(gcx.clone(), &PathBuf::from(&file_path_no_colon)).await?; + let file_content = + get_file_text_from_memory_or_disk(gcx.clone(), &PathBuf::from(&file_path_no_colon)).await?; let file_line_count = file_content.lines().count().max(1); if let Some(colon) = &colon_kind_mb { @@ -255,17 +356,28 @@ pub async fn context_file_from_file_path( line2 = colon.line2; } - // Validate line numbers - if they exceed file length, reset to whole file - if line1 > file_line_count || line2 > file_line_count { - tracing::warn!( - "Line numbers ({}, {}) exceed file length {} for {:?}, resetting to whole file", - line1, line2, file_line_count, file_path_no_colon - ); + if line1 == 0 && line2 == 0 { line1 = 1; line2 = file_line_count; - } else if line1 == 0 && line2 == 0 { + } else if line1 == 0 && line2 > 0 { line1 = 1; + line2 = line2.min(file_line_count); + } else if line1 > 0 && line2 == 0 { + line1 = line1.min(file_line_count); line2 = file_line_count; + } else if line1 > file_line_count || line2 > file_line_count { + tracing::warn!( + "Line numbers ({}, {}) exceed file length {} for {:?}, clamping", + line1, + line2, + file_line_count, + file_path_no_colon + ); + line1 = line1.min(file_line_count).max(1); + line2 = line2.min(file_line_count).max(1); + } + if line1 > line2 { + std::mem::swap(&mut line1, &mut line2); } Ok(ContextFile { @@ -277,10 +389,10 @@ pub async fn context_file_from_file_path( gradient_type, usefulness: 100.0, skip_pp: false, + file_rev: None, }) } - #[async_trait] impl AtCommand for AtFile { fn params(&self) -> &Vec> { @@ -293,36 +405,71 @@ impl AtCommand for AtFile { cmd: &mut AtCommandMember, args: &mut Vec, ) -> Result<(Vec, String), String> { - let mut arg0 = match args.iter().filter(|x|!x.text.trim().is_empty()).next() { + let (gcx, top_n, is_preview) = { + let ccx_lock = ccx.lock().await; + ( + ccx_lock.global_context.clone(), + ccx_lock.top_n, + ccx_lock.is_preview, + ) + }; + + let mut arg0 = match args.iter().find(|x| !x.text.trim().is_empty()) { Some(x) => x.clone(), None => { - cmd.ok = false; cmd.reason = Some("no file provided".to_string()); + cmd.ok = false; + cmd.reason = Some("no file provided".to_string()); args.clear(); - if ccx.lock().await.is_preview { + if is_preview { return Ok((vec![], "".to_string())); } return Err("Cannot execute @file: no file provided".to_string()); } }; - correct_at_arg(ccx.clone(), &self.params[0], &mut arg0).await; + args.clear(); args.push(arg0.clone()); - if !arg0.ok { - return Err(format!("arg0 is incorrect: {:?}. Reason: {:?}", arg0.text, arg0.reason)); + if let Some(resolved) = resolve_file_path_directly(gcx.clone(), &arg0.text).await { + arg0.text = resolved.clone(); + arg0.ok = true; + args[0] = arg0.clone(); + + match context_file_from_file_path(gcx.clone(), resolved).await { + Ok(context_file) => { + let replacement_text = if cmd.pos1 == 0 { + "".to_string() + } else { + arg0.text.clone() + }; + return Ok((vec_context_file_to_context_tools(vec![context_file]), replacement_text)); + } + Err(e) => { + if is_preview { + cmd.ok = false; + cmd.reason = Some(e); + return Ok((vec![], "".to_string())); + } + return Err(e); + } + } } - let (gcx, top_n) = { - let ccx_lock = ccx.lock().await; - (ccx_lock.global_context.clone(), ccx_lock.top_n) - }; - - // This is just best-behavior, since user has already submitted their request + correct_at_arg(ccx.clone(), &self.params[0], &mut arg0).await; + args[0] = arg0.clone(); - // TODO: use project paths as candidates, check file on disk + if !arg0.ok { + if is_preview { + cmd.ok = false; + cmd.reason = arg0.reason.clone(); + return Ok((vec![], "".to_string())); + } + return Err(format!("arg0 is incorrect: {:?}. Reason: {:?}", arg0.text, arg0.reason)); + } let candidates = { - let candidates_fuzzy0 = file_repair_candidates(gcx.clone(), &arg0.text, top_n, false).await; + let candidates_fuzzy0 = + file_repair_candidates(gcx.clone(), &arg0.text, top_n, false).await; if !candidates_fuzzy0.is_empty() { candidates_fuzzy0 } else { @@ -330,14 +477,37 @@ impl AtCommand for AtFile { } }; - if candidates.len() == 0 { + if candidates.is_empty() { + if is_preview { + cmd.ok = false; + cmd.reason = Some(format!("cannot find {:?}", arg0.text)); + return Ok((vec![], "".to_string())); + } return Err(format!("cannot find {:?}", arg0.text)); } - let context_file = context_file_from_file_path(gcx.clone(), candidates[0].clone()).await?; - let replacement_text = if cmd.pos1 == 0 { "".to_string() } else { arg0.text.clone() }; + let context_file = match context_file_from_file_path(gcx.clone(), candidates[0].clone()).await { + Ok(cf) => cf, + Err(e) => { + if is_preview { + cmd.ok = false; + cmd.reason = Some(e); + return Ok((vec![], "".to_string())); + } + return Err(e); + } + }; + + let replacement_text = if cmd.pos1 == 0 { + "".to_string() + } else { + arg0.text.clone() + }; - Ok((vec_context_file_to_context_tools(vec![context_file]), replacement_text)) + Ok(( + vec_context_file_to_context_tools(vec![context_file]), + replacement_text, + )) } } @@ -350,22 +520,50 @@ mod tests { { let mut value = String::from(":10-20"); let result = colon_lines_range_from_arg(&mut value); - assert_eq!(result, Some(ColonLinesRange { kind: RangeKind::Range, line1: 10, line2: 20 })); + assert_eq!( + result, + Some(ColonLinesRange { + kind: RangeKind::Range, + line1: 10, + line2: 20 + }) + ); } { let mut value = String::from(":5-"); let result = colon_lines_range_from_arg(&mut value); - assert_eq!(result, Some(ColonLinesRange { kind: RangeKind::GradToCursorSuffix, line1: 5, line2: 0 })); + assert_eq!( + result, + Some(ColonLinesRange { + kind: RangeKind::GradToCursorSuffix, + line1: 5, + line2: 0 + }) + ); } { let mut value = String::from(":-15"); let result = colon_lines_range_from_arg(&mut value); - assert_eq!(result, Some(ColonLinesRange { kind: RangeKind::GradToCursorPrefix, line1: 0, line2: 15 })); + assert_eq!( + result, + Some(ColonLinesRange { + kind: RangeKind::GradToCursorPrefix, + line1: 0, + line2: 15 + }) + ); } { let mut value = String::from(":25"); let result = colon_lines_range_from_arg(&mut value); - assert_eq!(result, Some(ColonLinesRange { kind: RangeKind::GradToCursorTwoSided, line1: 25, line2: 0 })); + assert_eq!( + result, + Some(ColonLinesRange { + kind: RangeKind::GradToCursorTwoSided, + line1: 25, + line2: 0 + }) + ); } { let mut value = String::from("invalid"); diff --git a/refact-agent/engine/src/at_commands/at_knowledge.rs b/refact-agent/engine/src/at_commands/at_knowledge.rs index f7001f844..4e81d2ac8 100644 --- a/refact-agent/engine/src/at_commands/at_knowledge.rs +++ b/refact-agent/engine/src/at_commands/at_knowledge.rs @@ -38,28 +38,32 @@ impl AtCommand for AtLoadKnowledge { let search_key = args.iter().map(|x| x.text.clone()).join(" "); let gcx = ccx.lock().await.global_context.clone(); - let memories = memories_search(gcx, &search_key, 5).await?; + let memories = memories_search(gcx, &search_key, 5, 0, None).await?; let mut seen_memids = HashSet::new(); - let unique_memories: Vec<_> = memories.into_iter() + let unique_memories: Vec<_> = memories + .into_iter() .filter(|m| seen_memids.insert(m.memid.clone())) .collect(); - let results = unique_memories.iter().map(|m| { - let mut result = String::new(); - if let Some(path) = &m.file_path { - result.push_str(&format!("📄 {}", path.display())); - if let Some((start, end)) = m.line_range { - result.push_str(&format!(":{}-{}", start, end)); + let results = unique_memories + .iter() + .map(|m| { + let mut result = String::new(); + if let Some(path) = &m.file_path { + result.push_str(&format!("📄 {}", path.display())); + if let Some((start, end)) = m.line_range { + result.push_str(&format!(":{}-{}", start, end)); + } + result.push('\n'); } - result.push('\n'); - } - if let Some(title) = &m.title { - result.push_str(&format!("📌 {}\n", title)); - } - result.push_str(&m.content); - result.push_str("\n\n"); - result - }).collect::(); + if let Some(title) = &m.title { + result.push_str(&format!("📌 {}\n", title)); + } + result.push_str(&m.content); + result.push_str("\n\n"); + result + }) + .collect::(); let context = ContextEnum::ChatMessage(ChatMessage::new("plain_text".to_string(), results)); Ok((vec![context], "".to_string())) diff --git a/refact-agent/engine/src/at_commands/at_search.rs b/refact-agent/engine/src/at_commands/at_search.rs index 8461477a6..b9bf56a89 100644 --- a/refact-agent/engine/src/at_commands/at_search.rs +++ b/refact-agent/engine/src/at_commands/at_search.rs @@ -1,4 +1,6 @@ -use crate::at_commands::at_commands::{vec_context_file_to_context_tools, AtCommand, AtCommandsContext, AtParam}; +use crate::at_commands::at_commands::{ + vec_context_file_to_context_tools, AtCommand, AtCommandsContext, AtParam, +}; use async_trait::async_trait; use std::sync::Arc; use tokio::sync::Mutex as AMutex; @@ -10,7 +12,6 @@ use crate::call_validation::{ContextEnum, ContextFile}; use crate::vecdb; use crate::vecdb::vdb_structs::VecdbSearch; - pub fn text_on_clip(query: &String, from_tool_call: bool) -> String { if !from_tool_call { return query.clone(); @@ -18,16 +19,13 @@ pub fn text_on_clip(query: &String, from_tool_call: bool) -> String { return format!("performed vecdb search, results below"); } - pub struct AtSearch { pub params: Vec>, } impl AtSearch { pub fn new() -> Self { - AtSearch { - params: vec![], - } + AtSearch { params: vec![] } } } @@ -37,10 +35,15 @@ fn results2message(results: &Vec) -> Vec) -> Vec, ) -> Result<(Vec, String), String> { - let args1 = args.iter().map(|x|x.clone()).collect::>(); - info!("execute @search {:?}", args1.iter().map(|x|x.text.clone()).collect::>()); + let args1 = args.iter().map(|x| x.clone()).collect::>(); + info!( + "execute @search {:?}", + args1.iter().map(|x| x.text.clone()).collect::>() + ); - let query = args.iter().map(|x|x.text.clone()).collect::>().join(" "); + let query = args + .iter() + .map(|x| x.text.clone()) + .collect::>() + .join(" "); if query.trim().is_empty() { if ccx.lock().await.is_preview { return Ok((vec![], "".to_string())); @@ -108,7 +119,10 @@ impl AtCommand for AtSearch { let vector_of_context_file = execute_at_search(ccx.clone(), &query, None).await?; let text = text_on_clip(&query, false); - Ok((vec_context_file_to_context_tools(vector_of_context_file), text)) + Ok(( + vec_context_file_to_context_tools(vector_of_context_file), + text, + )) } fn depends_on(&self) -> Vec { diff --git a/refact-agent/engine/src/at_commands/at_tree.rs b/refact-agent/engine/src/at_commands/at_tree.rs index 5afe782dc..20a29f20b 100644 --- a/refact-agent/engine/src/at_commands/at_tree.rs +++ b/refact-agent/engine/src/at_commands/at_tree.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::path::PathBuf; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; +use std::fs; use async_trait::async_trait; use tokio::sync::Mutex as AMutex; @@ -13,6 +14,26 @@ use crate::at_commands::execute_at::AtCommandMember; use crate::call_validation::{ChatMessage, ContextEnum}; use crate::files_correction::{correct_to_nearest_dir_path, get_project_dirs, paths_from_anywhere}; +const BINARY_EXTENSIONS: &[&str] = &[ + "png", "jpg", "jpeg", "gif", "bmp", "ico", "webp", "svg", "mp3", "mp4", "wav", "avi", "mov", + "mkv", "flv", "webm", "zip", "tar", "gz", "rar", "7z", "bz2", "xz", "exe", "dll", "so", + "dylib", "bin", "obj", "o", "a", "pdf", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "woff", + "woff2", "ttf", "otf", "eot", "pyc", "pyo", "class", "jar", "war", "db", "sqlite", "sqlite3", + "lock", "sum", +]; + +const SKIP_DIRS: &[&str] = &[ + "__pycache__", + "node_modules", + ".git", + ".svn", + ".hg", + "target", + "dist", + "build", + ".next", + ".nuxt", +]; pub struct AtTree { pub params: Vec>, @@ -20,115 +41,47 @@ pub struct AtTree { impl AtTree { pub fn new() -> Self { - AtTree { - params: vec![], - } - } -} - -#[derive(Debug, Clone)] -pub struct PathsHolderNodeArc(Arc>); - -impl PathsHolderNodeArc { - pub fn read(&self) -> std::sync::RwLockReadGuard<'_, PathsHolderNode> { - self.0.read().unwrap() - } -} - -impl PartialEq for PathsHolderNodeArc { - fn eq(&self, other: &Self) -> bool { - self.0.read().unwrap().path == other.0.read().unwrap().path - } -} - -#[derive(Debug, Clone, PartialEq)] -pub struct PathsHolderNode { - path: PathBuf, - is_dir: bool, - child_paths: Vec, - depth: usize, -} - -impl PathsHolderNode { - pub fn file_name(&self) -> String { - self.path.file_name().unwrap_or_default().to_string_lossy().to_string() - } - - pub fn child_paths(&self) -> &Vec { - &self.child_paths - } - - pub fn get_path(&self) -> &PathBuf { - &self.path - } -} - -pub fn construct_tree_out_of_flat_list_of_paths(paths_from_anywhere: &Vec) -> Vec { - let mut root_nodes: Vec = Vec::new(); - let mut nodes_map: HashMap = HashMap::new(); - - for path in paths_from_anywhere { - let components: Vec<_> = path.components().collect(); - let components_count = components.len(); - - let mut current_path = PathBuf::new(); - let mut parent_node: Option = None; - - for (index, component) in components.into_iter().enumerate() { - current_path.push(component); - - let is_last = index == components_count - 1; - let depth = index; - let node = nodes_map.entry(current_path.clone()).or_insert_with(|| { - PathsHolderNodeArc(Arc::new(RwLock::new( - PathsHolderNode { - path: current_path.clone(), - is_dir: !is_last, - child_paths: Vec::new(), - depth, - } - ))) - }); - - if node.0.read().unwrap().depth != depth { - node.0.write().unwrap().depth = depth; - } - - if let Some(parent) = parent_node { - if !parent.0.read().unwrap().child_paths.contains(node) { - parent.0.write().unwrap().child_paths.push(node.clone()); - } - } else { - if !root_nodes.contains(node) { - root_nodes.push(node.clone()); - } - } - - parent_node = Some(node.clone()); - } + AtTree { params: vec![] } } - root_nodes } pub struct TreeNode { pub children: HashMap, - // NOTE: we can store here more info like depth, sub files count, etc. + pub file_size: Option, + pub line_count: Option, } impl TreeNode { pub fn new() -> Self { TreeNode { children: HashMap::new(), + file_size: None, + line_count: None, } } pub fn build(paths: &Vec) -> Self { let mut root = TreeNode::new(); for path in paths { + if should_skip_path(path) { + continue; + } let mut node = &mut root; - for component in path.components() { + let components: Vec<_> = path.components().collect(); + let last_idx = components.len().saturating_sub(1); + + for (i, component) in components.iter().enumerate() { let key = component.as_os_str().to_string_lossy().to_string(); node = node.children.entry(key).or_insert_with(TreeNode::new); + + if i == last_idx { + if let Ok(meta) = fs::metadata(path) { + node.file_size = Some(meta.len()); + if !is_binary_file(path) { + node.line_count = count_lines(path); + } + } + } } } root @@ -139,128 +92,245 @@ impl TreeNode { } } -fn _print_symbols(db: Arc, path: &PathBuf) -> String { +fn should_skip_path(path: &PathBuf) -> bool { + for component in path.components() { + let name = component.as_os_str().to_string_lossy(); + if name.starts_with('.') || SKIP_DIRS.contains(&name.as_ref()) { + return true; + } + } + is_binary_file(path) +} + +fn is_binary_file(path: &PathBuf) -> bool { + path.extension() + .and_then(|e| e.to_str()) + .map(|e| BINARY_EXTENSIONS.contains(&e.to_lowercase().as_str())) + .unwrap_or(false) +} + +fn count_lines(path: &PathBuf) -> Option { + fs::read_to_string(path).ok().map(|c| c.lines().count()) +} + +fn format_size(bytes: u64) -> String { + if bytes < 1024 { + format!("{}B", bytes) + } else if bytes < 1024 * 1024 { + format!("{:.1}K", bytes as f64 / 1024.0) + } else { + format!("{:.1}M", bytes as f64 / (1024.0 * 1024.0)) + } +} + +fn print_symbols(db: Arc, path: &PathBuf) -> String { let cpath = path.to_string_lossy().to_string(); let defs = crate::ast::ast_db::doc_defs(db.clone(), &cpath); - let symbols_list = defs + let symbols: Vec = defs .iter() - .filter(|x| match x.symbol_type { - SymbolType::StructDeclaration | SymbolType::TypeAlias | SymbolType::FunctionDeclaration => true, - _ => false + .filter(|x| { + matches!( + x.symbol_type, + SymbolType::StructDeclaration + | SymbolType::TypeAlias + | SymbolType::FunctionDeclaration + ) }) .map(|x| x.name()) - .collect::>() - .join(", "); - if !symbols_list.is_empty() { format!(" ({symbols_list})") } else { "".to_string() } + .collect(); + if symbols.is_empty() { + String::new() + } else { + format!(" ({})", symbols.join(", ")) + } } -async fn _print_files_tree( +fn print_files_tree( tree: &TreeNode, ast_db: Option>, maxdepth: usize, + max_files: usize, + is_root_query: bool, ) -> String { fn traverse( node: &TreeNode, path: PathBuf, depth: usize, maxdepth: usize, + max_files: usize, + is_root_level: bool, ast_db: Option>, ) -> Option { if depth > maxdepth { return None; } - let mut output = String::new(); + let indent = " ".repeat(depth); - let name = path.file_name().unwrap_or_default().to_string_lossy().to_string(); + let name = path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + if !node.is_dir() { + let mut info = String::new(); + if let Some(size) = node.file_size { + info.push_str(&format!(" [{}]", format_size(size))); + } + if let Some(lines) = node.line_count { + info.push_str(&format!(" {}L", lines)); + } if let Some(db) = ast_db.clone() { - output.push_str(&format!("{}{}{}\n", indent, name, _print_symbols(db, &path))); - } else { - output.push_str(&format!("{}{}\n", indent, name)); + info.push_str(&print_symbols(db, &path)); } - return Some(output); - } else { - output.push_str(&format!("{}{}/\n", indent, name)); + return Some(format!("{}{}{}\n", indent, name, info)); } - let (mut dirs, mut files) = (0, 0); - let mut child_output = String::new(); - for (name, child) in &node.children { + let mut output = format!("{}{}/\n", indent, name); + let mut sorted_children: Vec<_> = node.children.iter().collect(); + sorted_children.sort_by(|a, b| { + let a_is_dir = a.1.is_dir(); + let b_is_dir = b.1.is_dir(); + b_is_dir.cmp(&a_is_dir).then(a.0.cmp(b.0)) + }); + + let total_files = sorted_children.iter().filter(|(_, c)| !c.is_dir()).count(); + + let should_truncate = !is_root_level && total_files > max_files; + let mut files_shown = 0; + let mut hidden_files = 0; + let mut hidden_dirs = 0; + + for (child_name, child) in &sorted_children { let mut child_path = path.clone(); - child_path.push(name); - if let Some(child_str) = traverse(child, child_path, depth + 1, maxdepth, ast_db.clone()) { - child_output.push_str(&child_str); + child_path.push(child_name); + + if !child.is_dir() && should_truncate && files_shown >= max_files { + hidden_files += 1; + continue; + } + + if let Some(child_str) = traverse( + child, + child_path, + depth + 1, + maxdepth, + max_files, + false, + ast_db.clone(), + ) { + output.push_str(&child_str); + if !child.is_dir() { + files_shown += 1; + } } else { - dirs += child.is_dir() as usize; - files += !child.is_dir() as usize; + if child.is_dir() { + hidden_dirs += 1; + } else { + hidden_files += 1; + } } } - if dirs > 0 || files > 0 { - let summary = format!("{} ...{} subdirs, {} files...\n", indent, dirs, files); - child_output.push_str(&summary); + if hidden_dirs > 0 || hidden_files > 0 { + output.push_str(&format!( + "{} ...+{} dirs, +{} files\n", + indent, hidden_dirs, hidden_files + )); } - output.push_str(&child_output); Some(output) } let mut result = String::new(); - for (name, node) in &tree.children { - if let Some(output) = traverse(node, PathBuf::from(name), 0, maxdepth, ast_db.clone()) { + let mut sorted_roots: Vec<_> = tree.children.iter().collect(); + sorted_roots.sort_by(|a, b| { + let a_is_dir = a.1.is_dir(); + let b_is_dir = b.1.is_dir(); + b_is_dir.cmp(&a_is_dir).then(a.0.cmp(b.0)) + }); + for (name, node) in sorted_roots { + if let Some(output) = traverse( + node, + PathBuf::from(name), + 0, + maxdepth, + max_files, + is_root_query, + ast_db.clone(), + ) { result.push_str(&output); - } else { - break; } } result } -async fn _print_files_tree_with_budget( +fn print_files_tree_with_budget( tree: &TreeNode, char_limit: usize, ast_db: Option>, + max_files: usize, + is_root_query: bool, ) -> String { - let mut good_enough = String::new(); - for maxdepth in 1..20 { - let bigger_tree_str = _print_files_tree(&tree, ast_db.clone(), maxdepth).await; - if bigger_tree_str.len() > char_limit { + let depth1_output = print_files_tree(tree, ast_db.clone(), 1, max_files, is_root_query); + if depth1_output.len() > char_limit { + let truncated: String = depth1_output.chars().take(char_limit.saturating_sub(20)).collect(); + return format!("{}...[truncated]", truncated); + } + let mut good_enough = depth1_output; + for maxdepth in 2..20 { + let bigger = print_files_tree(tree, ast_db.clone(), maxdepth, max_files, is_root_query); + if bigger.len() > char_limit { break; } - good_enough = bigger_tree_str; + good_enough = bigger; } good_enough } -pub async fn print_files_tree_with_budget( +pub async fn tree_for_tools( ccx: Arc>, tree: &TreeNode, use_ast: bool, + max_files: usize, + is_root_query: bool, ) -> Result { let (gcx, tokens_for_rag) = { let ccx_locked = ccx.lock().await; (ccx_locked.global_context.clone(), ccx_locked.tokens_for_rag) }; - tracing::info!("tree() tokens_for_rag={}", tokens_for_rag); - const SYMBOLS_PER_TOKEN: f32 = 3.5; - let char_limit = tokens_for_rag * SYMBOLS_PER_TOKEN as usize; - let mut ast_module_option = gcx.read().await.ast_service.clone(); - if !use_ast { - ast_module_option = None; - } - match ast_module_option { - Some(ast_module) => { - crate::ast::ast_indexer_thread::ast_indexer_block_until_finished(ast_module.clone(), 20_000, true).await; - let ast_db: Option> = Some(ast_module.lock().await.ast_index.clone()); - Ok(_print_files_tree_with_budget(tree, char_limit, ast_db.clone()).await) + const CHARS_PER_TOKEN: f32 = 3.5; + let char_limit = ((tokens_for_rag as f32) * CHARS_PER_TOKEN) as usize; + + let ast_db = if use_ast { + if let Some(ast_module) = gcx.read().await.ast_service.clone() { + crate::ast::ast_indexer_thread::ast_indexer_block_until_finished( + ast_module.clone(), + 20_000, + true, + ) + .await; + Some(ast_module.lock().await.ast_index.clone()) + } else { + None } - None => Ok(_print_files_tree_with_budget(tree, char_limit, None).await), - } -} + } else { + None + }; + Ok(print_files_tree_with_budget( + tree, + char_limit, + ast_db, + max_files, + is_root_query, + )) +} #[async_trait] impl AtCommand for AtTree { - fn params(&self) -> &Vec> { &self.params } + fn params(&self) -> &Vec> { + &self.params + } async fn at_execute( &self, @@ -270,49 +340,67 @@ impl AtCommand for AtTree { ) -> Result<(Vec, String), String> { let gcx = ccx.lock().await.global_context.clone(); let paths_from_anywhere = paths_from_anywhere(gcx.clone()).await; - let paths_from_anywhere_len = paths_from_anywhere.len(); - let project_dirs = get_project_dirs(gcx.clone()).await; - let filtered_paths: Vec = paths_from_anywhere.into_iter() - .filter(|path| project_dirs.iter().any(|project_dir| path.starts_with(project_dir))) + let filtered_paths: Vec = paths_from_anywhere + .into_iter() + .filter(|path| project_dirs.iter().any(|pd| path.starts_with(pd))) .collect(); - tracing::info!("tree: project_dirs={:?} file paths {} filtered project dirs only => {} paths", project_dirs, paths_from_anywhere_len, filtered_paths.len()); - *args = args.iter().take_while(|arg| arg.text != "\n" || arg.text == "--ast").take(2).cloned().collect(); + *args = args + .iter() + .take_while(|arg| arg.text != "\n" || arg.text == "--ast") + .take(2) + .cloned() + .collect(); - let tree = match args.iter().find(|x| x.text != "--ast") { - None => TreeNode::build(&filtered_paths), + let (tree, is_root_query) = match args.iter().find(|x| x.text != "--ast") { + None => (TreeNode::build(&filtered_paths), true), Some(arg) => { let path = arg.text.clone(); let candidates = correct_to_nearest_dir_path(gcx.clone(), &path, false, 10).await; - let candidate = return_one_candidate_or_a_good_error(gcx.clone(), &path, &candidates, &project_dirs, true).await.map_err(|e| { + let candidate = return_one_candidate_or_a_good_error( + gcx.clone(), + &path, + &candidates, + &project_dirs, + true, + ) + .await + .map_err(|e| { cmd.ok = false; cmd.reason = Some(e.clone()); args.clear(); e })?; let start_dir = PathBuf::from(candidate); - let paths_start_with_start_dir = filtered_paths.iter() - .filter(|f|f.starts_with(&start_dir)).cloned().collect::>(); - TreeNode::build(&paths_start_with_start_dir) + let paths = filtered_paths + .iter() + .filter(|f| f.starts_with(&start_dir)) + .cloned() + .collect(); + (TreeNode::build(&paths), false) } }; let use_ast = args.iter().any(|x| x.text == "--ast"); - let tree = print_files_tree_with_budget(ccx.clone(), &tree, use_ast).await.map_err(|err| { - warn!("{}", err); - err - })?; + let tree = tree_for_tools(ccx.clone(), &tree, use_ast, 10, is_root_query) + .await + .map_err(|err| { + warn!("{}", err); + err + })?; + let tree = if tree.is_empty() { "tree(): directory is empty".to_string() } else { tree }; - - let context = ContextEnum::ChatMessage(ChatMessage::new( - "plain_text".to_string(), - tree, - )); - Ok((vec![context], "".to_string())) + Ok(( + vec![ContextEnum::ChatMessage(ChatMessage::new( + "plain_text".to_string(), + tree, + ))], + "".to_string(), + )) } } diff --git a/refact-agent/engine/src/at_commands/at_web.rs b/refact-agent/engine/src/at_commands/at_web.rs index 2d1b9fd78..052f80da3 100644 --- a/refact-agent/engine/src/at_commands/at_web.rs +++ b/refact-agent/engine/src/at_commands/at_web.rs @@ -14,16 +14,13 @@ use crate::at_commands::at_commands::{AtCommand, AtCommandsContext, AtParam}; use crate::at_commands::execute_at::AtCommandMember; use crate::call_validation::{ChatMessage, ContextEnum}; - pub struct AtWeb { pub params: Vec>, } impl AtWeb { pub fn new() -> Self { - AtWeb { - params: vec![], - } + AtWeb { params: vec![] } } } @@ -42,7 +39,8 @@ impl AtCommand for AtWeb { let url = match args.get(0) { Some(x) => x.clone(), None => { - cmd.ok = false; cmd.reason = Some("missing URL".to_string()); + cmd.ok = false; + cmd.reason = Some("missing URL".to_string()); args.clear(); return Err("missing URL".to_string()); } @@ -54,25 +52,32 @@ impl AtCommand for AtWeb { let gcx_read = gcx.read().await; gcx_read.at_commands_preview_cache.clone() }; - let text_from_cache = preview_cache.lock().await.get(&format!("@web:{}", url.text)); + let text_from_cache = preview_cache + .lock() + .await + .get(&format!("@web:{}", url.text)); let text = match text_from_cache { Some(text) => text, None => { - let text = execute_at_web(&url.text, None).await + let text = execute_at_web(&url.text, None) + .await .map_err(|e| format!("Failed to execute @web {}.\nError: {e}", url.text))?; - preview_cache.lock().await.insert(format!("@web:{}", url.text), text.clone()); + preview_cache + .lock() + .await + .insert(format!("@web:{}", url.text), text.clone()); text } }; - let message = ChatMessage::new( - "plain_text".to_string(), - text, - ); + let message = ChatMessage::new("plain_text".to_string(), text); info!("executed @web {}", url.text); - Ok((vec![ContextEnum::ChatMessage(message)], format!("[see text downloaded from {} above]", url.text))) + Ok(( + vec![ContextEnum::ChatMessage(message)], + format!("[see text downloaded from {} above]", url.text), + )) } fn depends_on(&self) -> Vec { @@ -84,14 +89,20 @@ const JINA_READER_BASE_URL: &str = "https://r.jina.ai/"; const JINA_TIMEOUT_SECS: u64 = 60; const FALLBACK_TIMEOUT_SECS: u64 = 10; -pub async fn execute_at_web(url: &str, options: Option<&HashMap>) -> Result { +pub async fn execute_at_web( + url: &str, + options: Option<&HashMap>, +) -> Result { match fetch_with_jina_reader(url, options).await { Ok(text) => { info!("successfully fetched {} via Jina Reader", url); Ok(text) } Err(jina_err) => { - warn!("Jina Reader failed for {}: {}, falling back to simple fetch", url, jina_err); + warn!( + "Jina Reader failed for {}: {}, falling back to simple fetch", + url, jina_err + ); match fetch_simple(url).await { Ok(text) => { info!("successfully fetched {} via simple fetch (fallback)", url); @@ -105,14 +116,19 @@ pub async fn execute_at_web(url: &str, options: Option<&HashMap>) } } -async fn fetch_with_jina_reader(url: &str, options: Option<&HashMap>) -> Result { +async fn fetch_with_jina_reader( + url: &str, + options: Option<&HashMap>, +) -> Result { let client = Client::builder() .timeout(Duration::from_secs(JINA_TIMEOUT_SECS)) .build() .map_err(|e| e.to_string())?; let jina_url = format!("{}{}", JINA_READER_BASE_URL, url); - let mut request = client.get(&jina_url).header("User-Agent", "RefactAgent/1.0"); + let mut request = client + .get(&jina_url) + .header("User-Agent", "RefactAgent/1.0"); let mut is_streaming = false; @@ -157,7 +173,10 @@ async fn fetch_with_jina_reader(url: &str, options: Option<&HashMap) -> Vec> { - vec![] + fn finalise(&mut self, lines: Vec) -> Vec> { + lines.into_iter().map(|line| TaggedLine::from_string(line, &())).collect() } } @@ -303,19 +322,29 @@ async fn fetch_html(url: &str, timeout: Duration) -> Result { .build() .map_err(|e| e.to_string())?; - let response = client.get(url) + let response = client + .get(url) .header("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64)") - .header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8") + .header( + "Accept", + "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + ) .header("Accept-Language", "en-US,en;q=0.5") .header("Connection", "keep-alive") .header("Upgrade-Insecure-Requests", "1") .header("Cache-Control", "max-age=0") .header("DNT", "1") .header("Referer", "https://www.google.com/") - .send().await.map_err(|e| e.to_string())?; + .send() + .await + .map_err(|e| e.to_string())?; if !response.status().is_success() { - return Err(format!("unable to fetch url: {}; status: {}", url, response.status())); + return Err(format!( + "unable to fetch url: {}; status: {}", + url, + response.status() + )); } let body = response.text().await.map_err(|e| e.to_string())?; Ok(body) @@ -332,7 +361,6 @@ async fn fetch_simple(url: &str) -> Result { Ok(text) } - #[cfg(test)] mod tests { use tracing::warn; @@ -342,7 +370,11 @@ mod tests { async fn test_execute_at_web_jina() { let url = "https://doc.rust-lang.org/book/ch03-04-comments.html"; match execute_at_web(url, None).await { - Ok(text) => info!("test executed successfully (length: {} chars):\n\n{}", text.len(), &text[..text.len().min(500)]), + Ok(text) => info!( + "test executed successfully (length: {} chars):\n\n{}", + text.len(), + &text[..text.len().min(500)] + ), Err(e) => warn!("test failed with error: {e}"), } } @@ -351,7 +383,10 @@ mod tests { async fn test_jina_pdf_reading() { let url = "https://www.w3.org/WAI/WCAG21/Techniques/pdf/PDF1.pdf"; match execute_at_web(url, None).await { - Ok(text) => info!("PDF test executed successfully (length: {} chars)", text.len()), + Ok(text) => info!( + "PDF test executed successfully (length: {} chars)", + text.len() + ), Err(e) => warn!("PDF test failed with error: {e}"), } } @@ -360,9 +395,15 @@ mod tests { async fn test_jina_with_options() { let url = "https://doc.rust-lang.org/book/ch03-04-comments.html"; let mut options = HashMap::new(); - options.insert("target_selector".to_string(), Value::String("main".to_string())); + options.insert( + "target_selector".to_string(), + Value::String("main".to_string()), + ); match execute_at_web(url, Some(&options)).await { - Ok(text) => info!("options test executed successfully (length: {} chars)", text.len()), + Ok(text) => info!( + "options test executed successfully (length: {} chars)", + text.len() + ), Err(e) => warn!("options test failed with error: {e}"), } } diff --git a/refact-agent/engine/src/at_commands/execute_at.rs b/refact-agent/engine/src/at_commands/execute_at.rs index 8f64eba4e..5a8aeffd8 100644 --- a/refact-agent/engine/src/at_commands/execute_at.rs +++ b/refact-agent/engine/src/at_commands/execute_at.rs @@ -1,23 +1,20 @@ use std::sync::Arc; use tokio::sync::Mutex as AMutex; use regex::Regex; -use serde_json::{json, Value}; +use serde_json::json; use tokenizers::Tokenizer; use tracing::{info, warn}; -use crate::at_commands::at_commands::{AtCommandsContext, AtParam, filter_only_context_file_from_context_tool}; +use crate::at_commands::at_commands::{ + AtCommandsContext, AtParam, filter_only_context_file_from_context_tool, +}; use crate::call_validation::{ChatContent, ChatMessage, ContextEnum}; -use crate::http::http_post_json; -use crate::http::routers::v1::at_commands::{CommandExecutePost, CommandExecuteResponse}; -use crate::integrations::docker::docker_container_manager::docker_container_get_host_lsp_port_to_connect; use crate::postprocessing::pp_context_files::postprocess_context_files; use crate::postprocessing::pp_plain_text::postprocess_plain_text; use crate::scratchpads::scratchpad_utils::{HasRagResults, max_tokens_for_rag_chat}; - pub const MIN_RAG_CONTEXT_LIMIT: usize = 256; - pub async fn run_at_commands_locally( ccx: Arc>, tokenizer: Option>, @@ -27,7 +24,12 @@ pub async fn run_at_commands_locally( ) -> (Vec, bool) { let (n_ctx, top_n, is_preview, gcx) = { let ccx_locked = ccx.lock().await; - (ccx_locked.n_ctx, ccx_locked.top_n, ccx_locked.is_preview, ccx_locked.global_context.clone()) + ( + ccx_locked.n_ctx, + ccx_locked.top_n, + ccx_locked.is_preview, + ccx_locked.global_context.clone(), + ) }; if !is_preview { let preview_cache = gcx.read().await.at_commands_preview_cache.clone(); @@ -36,7 +38,7 @@ pub async fn run_at_commands_locally( let reserve_for_context = max_tokens_for_rag_chat(n_ctx, maxgen); info!("reserve_for_context {} tokens", reserve_for_context); - let any_context_produced = false; + let mut any_context_produced = false; let mut user_msg_starts = original_messages.len(); let mut messages_with_at: usize = 0; @@ -59,14 +61,31 @@ pub async fn run_at_commands_locally( let messages_after_user_msg = original_messages.split_off(user_msg_starts); let mut new_messages = original_messages; for (idx, mut msg) in messages_after_user_msg.into_iter().enumerate() { - // todo: make multimodal messages support @commands - if let ChatContent::Multimodal(_) = &msg.content { - stream_back_to_user.push_in_json(json!(msg)); - new_messages.push(msg); - continue; - } - let mut content = msg.content.content_text_only(); - let content_n_tokens = msg.content.count_tokens(tokenizer.clone(), &None).unwrap_or(0) as usize; + let (mut content, original_images) = if let ChatContent::Multimodal(parts) = &msg.content { + let text = parts + .iter() + .filter_map(|p| { + if p.m_type == "text" { + Some(p.m_content.as_str()) + } else { + None + } + }) + .collect::>() + .join("\n"); + let images = parts + .iter() + .filter(|p| p.m_type.starts_with("image/")) + .cloned() + .collect::>(); + (text, Some(images)) + } else { + (msg.content.content_text_only(), None) + }; + let content_n_tokens = msg + .content + .count_tokens(tokenizer.clone(), &None) + .unwrap_or(0) as usize; let mut context_limit = reserve_for_context / messages_with_at.max(1); context_limit = context_limit.saturating_sub(content_n_tokens); @@ -79,16 +98,13 @@ pub async fn run_at_commands_locally( messages_exec_output.extend(res); } - let mut context_file_pp = if context_limit > MIN_RAG_CONTEXT_LIMIT { - filter_only_context_file_from_context_tool(&messages_exec_output) - } else { - Vec::new() - }; + let mut context_file_pp = filter_only_context_file_from_context_tool(&messages_exec_output); let mut plain_text_messages = vec![]; for exec_result in messages_exec_output.into_iter() { // at commands exec() can produce role "user" "assistant" "diff" "plain_text" - if let ContextEnum::ChatMessage(raw_msg) = exec_result { // means not context_file + if let ContextEnum::ChatMessage(raw_msg) = exec_result { + // means not context_file if raw_msg.role != "plain_text" { stream_back_to_user.push_in_json(json!(raw_msg)); new_messages.push(raw_msg); @@ -98,17 +114,19 @@ pub async fn run_at_commands_locally( } } - // TODO: reduce context_limit by tokens(messages_exec_output) - - if context_limit > MIN_RAG_CONTEXT_LIMIT { + if !plain_text_messages.is_empty() || !context_file_pp.is_empty() { + let effective_context_limit = context_limit.max(MIN_RAG_CONTEXT_LIMIT); let (tokens_limit_plain, mut tokens_limit_files) = { if context_file_pp.is_empty() { - (context_limit, 0) + (effective_context_limit, 0) } else { - (context_limit / 2, context_limit / 2) + (effective_context_limit / 2, effective_context_limit / 2) } }; - info!("context_limit {} tokens_limit_plain {} tokens_limit_files: {}", context_limit, tokens_limit_plain, tokens_limit_files); + info!( + "context_limit {} tokens_limit_plain {} tokens_limit_files: {}", + context_limit, tokens_limit_plain, tokens_limit_files + ); let t0 = std::time::Instant::now(); @@ -117,7 +135,8 @@ pub async fn run_at_commands_locally( tokenizer.clone(), tokens_limit_plain, &None, - ).await; + ) + .await; for m in pp_plain_text { // OUTPUT: plain text after all custom messages stream_back_to_user.push_in_json(json!(m)); @@ -127,7 +146,11 @@ pub async fn run_at_commands_locally( info!("tokens_limit_files {}", tokens_limit_files); let (gcx, mut pp_settings, pp_skeleton) = { let ccx_locked = ccx.lock().await; - (ccx_locked.global_context.clone(), ccx_locked.postprocess_parameters.clone(), ccx_locked.pp_skeleton) + ( + ccx_locked.global_context.clone(), + ccx_locked.postprocess_parameters.clone(), + ccx_locked.pp_skeleton, + ) }; pp_settings.use_ast_based_pp = false; pp_settings.max_files_n = top_n; @@ -141,25 +164,39 @@ pub async fn run_at_commands_locally( tokens_limit_files, false, &pp_settings, - ).await; - if !post_processed.is_empty() { - // OUTPUT: files after all custom messages and plain text - let json_vec = post_processed.iter().map(|p| { json!(p)}).collect::>(); - if !json_vec.is_empty() { - let message = ChatMessage::new( - "context_file".to_string(), - serde_json::to_string(&json_vec).unwrap_or("".to_string()), - ); - stream_back_to_user.push_in_json(json!(message)); - new_messages.push(message); - } + ) + .await; + let (post_processed_files, _notes) = post_processed; + if !post_processed_files.is_empty() { + any_context_produced = true; + let message = ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles(post_processed_files), + ..Default::default() + }; + stream_back_to_user.push_in_json(json!(message)); + new_messages.push(message); } - info!("postprocess_plain_text_messages + postprocess_context_files {:.3}s", t0.elapsed().as_secs_f32()); + info!( + "postprocess_plain_text_messages + postprocess_context_files {:.3}s", + t0.elapsed().as_secs_f32() + ); } - if content.trim().len() > 0 { - // stream back to the user, with at-commands replaced - msg.content = ChatContent::SimpleText(content); + if content.trim().len() > 0 || original_images.is_some() { + msg.content = if let Some(mut images) = original_images { + let mut parts = vec![]; + if !content.trim().is_empty() { + parts.push(crate::scratchpads::multimodality::MultimodalElement { + m_type: "text".to_string(), + m_content: content, + }); + } + parts.append(&mut images); + ChatContent::Multimodal(parts) + } else { + ChatContent::SimpleText(content) + }; stream_back_to_user.push_in_json(json!(msg)); new_messages.push(msg); } @@ -168,47 +205,6 @@ pub async fn run_at_commands_locally( (new_messages, any_context_produced) } -pub async fn run_at_commands_remotely( - ccx: Arc>, - model_id: &str, - maxgen: usize, - original_messages: Vec, - stream_back_to_user: &mut HasRagResults, -) -> Result<(Vec, bool), String> { - let (gcx, n_ctx, subchat_tool_parameters, postprocess_parameters, chat_id) = { - let ccx_locked = ccx.lock().await; - ( - ccx_locked.global_context.clone(), - ccx_locked.n_ctx, - ccx_locked.subchat_tool_parameters.clone(), - ccx_locked.postprocess_parameters.clone(), - ccx_locked.chat_id.clone() - ) - }; - - let post = CommandExecutePost { - messages: original_messages, - n_ctx, - maxgen, - subchat_tool_parameters, - postprocess_parameters, - model_name: model_id.to_string(), - chat_id: chat_id.clone(), - }; - - let port = docker_container_get_host_lsp_port_to_connect(gcx.clone(), &chat_id).await?; - tracing::info!("run_at_commands_remotely: connecting to port {}", port); - - let url = format!("http://localhost:{port}/v1/at-command-execute"); - let response: CommandExecuteResponse = http_post_json(&url, &post).await?; - - for msg in response.messages_to_stream_back { - stream_back_to_user.push_in_json(msg); - } - - Ok((response.messages, response.any_context_produced)) -} - pub async fn correct_at_arg( ccx: Arc>, param: &Box, @@ -226,7 +222,8 @@ pub async fn correct_at_arg( } }; if !param.is_value_valid(ccx.clone(), &completion).await { - arg.ok = false; arg.reason = Some("incorrect argument; completion did not help".to_string()); + arg.ok = false; + arg.reason = Some("incorrect argument; completion did not help".to_string()); return; } arg.text = completion; @@ -237,7 +234,7 @@ pub async fn execute_at_commands_in_query( query: &mut String, ) -> (Vec, Vec) { let at_commands = ccx.lock().await.at_commands.clone(); - let at_command_names = at_commands.keys().map(|x|x.clone()).collect::>(); + let at_command_names = at_commands.keys().map(|x| x.clone()).collect::>(); let mut context_enums = vec![]; let mut highlight_members = vec![]; let mut clips: Vec<(String, usize, usize)> = vec![]; @@ -246,23 +243,46 @@ pub async fn execute_at_commands_in_query( for (w_idx, (word, pos1, pos2)) in words.iter().enumerate() { let cmd = match at_commands.get(word) { Some(c) => c, - None => { continue; } + None => { + continue; + } }; - let args = words.iter().skip(w_idx + 1).map(|x|x.clone()).collect::>(); + let args = words + .iter() + .skip(w_idx + 1) + .map(|x| x.clone()) + .collect::>(); let mut cmd_member = AtCommandMember::new("cmd".to_string(), word.clone(), *pos1, *pos2); let mut arg_members = vec![]; - for (text, pos1, pos2) in args.iter().map(|x|x.clone()) { - if at_command_names.contains(&text) { break; } + for (text, pos1, pos2) in args.iter().map(|x| x.clone()) { + if at_command_names.contains(&text) { + break; + } // TODO: break if there's \n\n - arg_members.push(AtCommandMember::new("arg".to_string(), text.clone(), pos1, pos2)); + arg_members.push(AtCommandMember::new( + "arg".to_string(), + text.clone(), + pos1, + pos2, + )); } - match cmd.at_execute(ccx.clone(), &mut cmd_member, &mut arg_members).await { + match cmd + .at_execute(ccx.clone(), &mut cmd_member, &mut arg_members) + .await + { Ok((res, text_on_clip)) => { context_enums.extend(res); - clips.push((text_on_clip, cmd_member.pos1, arg_members.last().map(|x|x.pos2).unwrap_or(cmd_member.pos2))); - }, + clips.push(( + text_on_clip, + cmd_member.pos1, + arg_members + .last() + .map(|x| x.pos2) + .unwrap_or(cmd_member.pos2), + )); + } Err(e) => { cmd_member.ok = false; cmd_member.reason = Some(format!("incorrect argument; failed to complete: {}", e)); @@ -292,43 +312,83 @@ pub struct AtCommandMember { impl AtCommandMember { pub fn new(kind: String, text: String, pos1: usize, pos2: usize) -> Self { - Self { kind, text, pos1, pos2, ok: true, reason: None} + Self { + kind, + text, + pos1, + pos2, + ok: true, + reason: None, + } } } pub fn parse_words_from_line(line: &String) -> Vec<(String, usize, usize)> { - fn trim_punctuation(s: &str) -> String { - s.trim_end_matches(&['!', '.', ',', '?'][..]).to_string() + fn trim_punctuation(s: &str) -> &str { + s.trim_end_matches(&['!', '.', ',', '?'][..]) } - // let word_regex = Regex::new(r#"(@?[^ !?@\n]*)"#).expect("Invalid regex"); - // let word_regex = Regex::new(r#"(@?[^ !?@\n]+|\n|@)"#).expect("Invalid regex"); - let word_regex = Regex::new(r#"(@?\S*)"#).expect("Invalid regex"); // fixed windows + let word_regex = Regex::new(r"@?\S+").expect("Invalid regex"); let mut results = vec![]; - for cap in word_regex.captures_iter(line) { - if let Some(matched) = cap.get(1) { - let trimmed_match = trim_punctuation(&matched.as_str().to_string()); - results.push((trimmed_match.clone(), matched.start(), matched.start() + trimmed_match.len())); + for m in word_regex.find_iter(line) { + let trimmed = trim_punctuation(m.as_str()); + if !trimmed.is_empty() { + results.push(( + trimmed.to_string(), + m.start(), + m.start() + trimmed.len(), + )); } } results } - #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_words_from_line_with_link() { - let line = "Check out this link: https://doc.rust-lang.org/book/ch03-04-comments.html".to_string(); + let line = + "Check out this link: https://doc.rust-lang.org/book/ch03-04-comments.html".to_string(); let parsed_words = parse_words_from_line(&line); - let link = parsed_words.iter().find(|(word, _, _)| word == "https://doc.rust-lang.org/book/ch03-04-comments.html"); + let link = parsed_words + .iter() + .find(|(word, _, _)| word == "https://doc.rust-lang.org/book/ch03-04-comments.html"); assert!(link.is_some(), "The link should be parsed as a single word"); if let Some((word, _start, _end)) = link { assert_eq!(word, "https://doc.rust-lang.org/book/ch03-04-comments.html"); } } + + #[test] + fn test_parse_words_from_line_no_empty_tokens() { + let line = "hello world test @file".to_string(); + let parsed_words = parse_words_from_line(&line); + + for (word, _, _) in parsed_words.iter() { + assert!(!word.is_empty(), "No empty tokens should be produced"); + } + } + + #[test] + fn test_parse_words_from_line_long_input() { + let line = (0..1000).map(|i| format!("word{} ", i)).collect::(); + let parsed_words = parse_words_from_line(&line); + + assert!(parsed_words.len() < 2000, "Performance regression: too many tokens for long input"); + assert!(parsed_words.iter().all(|(w, _, _)| !w.is_empty()), "No empty tokens"); + } + + #[test] + fn test_parse_words_from_line_punctuation_trimming() { + let line = "@file.txt, src/main.rs! code?".to_string(); + let parsed_words = parse_words_from_line(&line); + + assert_eq!(parsed_words[0].0, "@file.txt"); + assert_eq!(parsed_words[1].0, "src/main.rs"); + assert_eq!(parsed_words[2].0, "code"); + } } diff --git a/refact-agent/engine/src/at_commands/mod.rs b/refact-agent/engine/src/at_commands/mod.rs index 385b7bbe2..772c17a6d 100644 --- a/refact-agent/engine/src/at_commands/mod.rs +++ b/refact-agent/engine/src/at_commands/mod.rs @@ -1,9 +1,8 @@ -pub mod execute_at; pub mod at_ast_definition; -pub mod at_ast_reference; pub mod at_commands; pub mod at_file; -pub mod at_web; -pub mod at_tree; -pub mod at_search; pub mod at_knowledge; +pub mod at_search; +pub mod at_tree; +pub mod at_web; +pub mod execute_at; diff --git a/refact-agent/engine/src/background_tasks.rs b/refact-agent/engine/src/background_tasks.rs index a537614e7..71969f077 100644 --- a/refact-agent/engine/src/background_tasks.rs +++ b/refact-agent/engine/src/background_tasks.rs @@ -6,7 +6,7 @@ use tokio::sync::RwLock as ARwLock; use tokio::task::JoinHandle; use crate::global_context::GlobalContext; - +use crate::knowledge_index::build_knowledge_index; pub struct BackgroundTasksHolder { tasks: Vec>, @@ -14,9 +14,7 @@ pub struct BackgroundTasksHolder { impl BackgroundTasksHolder { pub fn new(tasks: Vec>) -> Self { - BackgroundTasksHolder { - tasks - } + BackgroundTasksHolder { tasks } } pub fn push_back(&mut self, task: JoinHandle<()>) { @@ -24,8 +22,8 @@ impl BackgroundTasksHolder { } pub fn extend(&mut self, tasks: T) - where - T: IntoIterator>, + where + T: IntoIterator>, { self.tasks.extend(tasks); } @@ -39,25 +37,61 @@ impl BackgroundTasksHolder { } } -pub async fn start_background_tasks(gcx: Arc>, _config_dir: &PathBuf) -> BackgroundTasksHolder { +pub async fn start_background_tasks( + gcx: Arc>, + _config_dir: &PathBuf, +) -> BackgroundTasksHolder { + let gcx_for_knowledge_index = gcx.clone(); let mut bg = BackgroundTasksHolder::new(vec![ - tokio::spawn(crate::files_in_workspace::files_in_workspace_init_task(gcx.clone())), - tokio::spawn(crate::telemetry::basic_transmit::telemetry_background_task(gcx.clone())), - tokio::spawn(crate::snippets_transmit::tele_snip_background_task(gcx.clone())), - tokio::spawn(crate::vecdb::vdb_highlev::vecdb_background_reload(gcx.clone())), - tokio::spawn(crate::integrations::sessions::remove_expired_sessions_background_task(gcx.clone())), - tokio::spawn(crate::git::cleanup::git_shadow_cleanup_background_task(gcx.clone())), - tokio::spawn(crate::knowledge_graph::knowledge_cleanup_background_task(gcx.clone())), + tokio::spawn(crate::files_in_workspace::files_in_workspace_init_task( + gcx.clone(), + )), + tokio::spawn(crate::telemetry::basic_transmit::telemetry_background_task( + gcx.clone(), + )), + tokio::spawn(crate::snippets_transmit::tele_snip_background_task( + gcx.clone(), + )), + tokio::spawn(crate::vecdb::vdb_highlev::vecdb_background_reload( + gcx.clone(), + )), + tokio::spawn( + crate::integrations::sessions::remove_expired_sessions_background_task(gcx.clone()), + ), + tokio::spawn(crate::git::cleanup::git_shadow_cleanup_background_task( + gcx.clone(), + )), + tokio::spawn(crate::knowledge_graph::knowledge_cleanup_background_task( + gcx.clone(), + )), + tokio::spawn(crate::trajectory_memos::trajectory_memos_background_task( + gcx.clone(), + )), + tokio::spawn(crate::chat::start_agent_monitor(gcx.clone())), + tokio::spawn(crate::providers::oauth_refresh::oauth_token_refresh_background_task(gcx.clone())), + tokio::spawn(async move { + // Build in-memory knowledge index in background (best-effort). + let index = build_knowledge_index(gcx_for_knowledge_index.clone()).await; + *gcx_for_knowledge_index + .read() + .await + .knowledge_index + .lock() + .await = index; + tracing::info!("knowledge_index: built"); + }), ]); let ast = gcx.clone().read().await.ast_service.clone(); if let Some(ast_service) = ast { - bg.extend(crate::ast::ast_indexer_thread::ast_indexer_start(ast_service, gcx.clone()).await); + bg.extend( + crate::ast::ast_indexer_thread::ast_indexer_start(ast_service, gcx.clone()).await, + ); } let files_jsonl_path = gcx.clone().read().await.cmdline.files_jsonl_path.clone(); if !files_jsonl_path.is_empty() { - bg.extend(vec![ - tokio::spawn(crate::files_in_jsonl::reload_if_jsonl_changes_background_task(gcx.clone())) - ]); + bg.extend(vec![tokio::spawn( + crate::files_in_jsonl::reload_if_jsonl_changes_background_task(gcx.clone()), + )]); } bg } diff --git a/refact-agent/engine/src/call_validation.rs b/refact-agent/engine/src/call_validation.rs index 6c5d6fb6d..15d16f0ed 100644 --- a/refact-agent/engine/src/call_validation.rs +++ b/refact-agent/engine/src/call_validation.rs @@ -2,7 +2,6 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::hash::Hash; use axum::http::StatusCode; -use indexmap::IndexMap; use ropey::Rope; use crate::custom_error::ScratchError; @@ -27,34 +26,59 @@ pub struct CodeCompletionInputs { #[serde(rename_all = "lowercase")] #[derive(Default)] pub enum ReasoningEffort { + #[serde(alias = "none")] + NoReasoning, + Minimal, Low, #[default] Medium, High, + XHigh, + Max, } impl ReasoningEffort { - pub fn to_string(&self) -> String { format!("{:?}", self).to_lowercase() } + pub fn to_string(&self) -> String { + match self { + Self::NoReasoning => "none".to_string(), + other => format!("{:?}", other).to_lowercase(), + } + } + + pub fn from_str_opt(s: &str) -> Option { + match s.to_lowercase().as_str() { + "none" => Some(Self::NoReasoning), + "minimal" => Some(Self::Minimal), + "low" => Some(Self::Low), + "medium" => Some(Self::Medium), + "high" => Some(Self::High), + "xhigh" => Some(Self::XHigh), + "max" => Some(Self::Max), + _ => None, + } + } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct SamplingParameters { #[serde(default)] - pub max_new_tokens: usize, // TODO: rename it to `max_completion_tokens` everywhere, including chat-js + pub max_new_tokens: usize, pub temperature: Option, - pub top_p: Option, // NOTE: deprecated + pub frequency_penalty: Option, + pub top_p: Option, #[serde(default)] pub stop: Vec, pub n: Option, #[serde(default)] pub boost_reasoning: bool, - // NOTE: use the following arguments for direct API calls #[serde(default)] - pub reasoning_effort: Option, // OpenAI style reasoning + pub reasoning_effort: Option, + #[serde(default)] + pub thinking_budget: Option, #[serde(default)] - pub thinking: Option, // Anthropic style reasoning + pub thinking: Option, #[serde(default)] - pub enable_thinking: Option, // Qwen style reasoning + pub enable_thinking: Option, } #[derive(Debug, Deserialize, Clone)] @@ -110,12 +134,14 @@ pub fn code_completion_post_validate( Ok(()) } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ContextFile { pub file_name: String, pub file_content: String, pub line1: usize, // starts from 1, zero means non-valid pub line2: usize, // starts from 1 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub file_rev: Option, #[serde(default, skip_serializing)] pub symbols: Vec, #[serde(default = "default_gradient_type_value", skip_serializing)] @@ -126,7 +152,25 @@ pub struct ContextFile { pub skip_pp: bool, // if true, skip postprocessing compression for this file } -fn default_gradient_type_value() -> i32 { -1 } +impl Default for ContextFile { + fn default() -> Self { + Self { + file_name: String::new(), + file_content: String::new(), + line1: 0, + line2: 0, + file_rev: None, + symbols: Vec::new(), + gradient_type: -1, + usefulness: 0.0, + skip_pp: false, + } + } +} + +fn default_gradient_type_value() -> i32 { + -1 +} #[derive(Debug, Clone)] pub enum ContextEnum { @@ -140,12 +184,29 @@ pub struct ChatToolFunction { pub name: String, } +impl ChatToolFunction { + /// Parse arguments as a JSON object, normalizing empty/non-object values to `{}`. + /// + /// LLMs sometimes emit empty strings, `""`, `null`, or other non-object JSON + /// as tool arguments (especially on truncated responses). This method treats any + /// arguments string that doesn't look like a JSON object as equivalent to `{}`. + pub fn parse_args(&self) -> Result, serde_json::Error> { + let trimmed = self.arguments.trim(); + let args_str = if trimmed.starts_with('{') { trimmed } else { "{}" }; + serde_json::from_str(args_str) + } +} + #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ChatToolCall { pub id: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub index: Option, pub function: ChatToolFunction, #[serde(rename = "type")] pub tool_type: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extra_content: Option, } #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] @@ -153,6 +214,7 @@ pub struct ChatToolCall { pub enum ChatContent { SimpleText(String), Multimodal(Vec), + ContextFiles(Vec), } impl Default for ChatContent { @@ -161,20 +223,41 @@ impl Default for ChatContent { } } +#[derive(Debug, Serialize, Deserialize, Clone, Default)] +pub struct MeteringUsd { + pub prompt_usd: f64, + pub generated_usd: f64, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cache_read_usd: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub cache_creation_usd: Option, + pub total_usd: f64, +} + #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct ChatUsage { pub prompt_tokens: usize, pub completion_tokens: usize, - pub total_tokens: usize, // TODO: remove (can produce self-contradictory data when prompt+completion != total) + pub total_tokens: usize, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cache_creation_input_tokens", alias = "cache_creation_tokens")] + pub cache_creation_tokens: Option, + #[serde(default, skip_serializing_if = "Option::is_none", rename = "cache_read_input_tokens", alias = "cache_read_tokens")] + pub cache_read_tokens: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub metering_usd: Option, } #[derive(Debug, Serialize, Clone, Default)] pub struct ChatMessage { + #[serde(default, skip_serializing_if = "String::is_empty")] + pub message_id: String, pub role: String, pub content: ChatContent, #[serde(default, skip_serializing_if = "Option::is_none")] pub finish_reason: Option, #[serde(default, skip_serializing_if = "Option::is_none")] + pub reasoning_content: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] pub tool_calls: Option>, #[serde(default, skip_serializing_if = "String::is_empty")] pub tool_call_id: String, @@ -184,26 +267,28 @@ pub struct ChatMessage { pub usage: Option, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub checkpoints: Vec, - #[serde(default, skip_serializing_if="Option::is_none")] + #[serde(default, skip_serializing_if = "Option::is_none")] pub thinking_blocks: Option>, + /// Citations from web search results + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub citations: Vec, + /// Server-executed content blocks (e.g., server_tool_use, web_search_tool_result) + /// that must be passed back verbatim in multi-turn conversations. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub server_content_blocks: Vec, + /// Extra provider-specific fields that should be preserved round-trip + #[serde(default, skip_serializing_if = "serde_json::Map::is_empty", flatten)] + pub extra: serde_json::Map, #[serde(skip)] pub output_filter: Option, } -#[derive(Debug, Serialize, Deserialize, Clone, Copy)] -#[serde(rename_all = "lowercase")] -pub enum ModelType { - Chat, - Completion, - Embedding, -} - #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ChatModelType { Light, Default, - Thinking + Thinking, } impl Default for ChatModelType { @@ -219,47 +304,12 @@ pub struct SubchatParameters { #[serde(default)] pub subchat_model: String, pub subchat_n_ctx: usize, - #[serde(default)] - pub subchat_tokens_for_rag: usize, - #[serde(default)] - pub subchat_temperature: Option, - #[serde(default)] pub subchat_max_new_tokens: usize, - #[serde(default)] + pub subchat_temperature: Option, + pub subchat_tokens_for_rag: usize, pub subchat_reasoning_effort: Option, } -#[derive(Debug, Deserialize, Clone, Default)] -pub struct ChatPost { - pub messages: Vec, - #[serde(default)] - pub parameters: SamplingParameters, - #[serde(default)] - pub model: String, - pub stream: Option, - pub temperature: Option, - #[serde(default)] - pub max_tokens: Option, - #[serde(default)] - pub increase_max_tokens: bool, - #[serde(default)] - pub n: Option, - #[serde(default)] - pub tool_choice: Option, - #[serde(default)] - pub checkpoints_enabled: bool, - #[serde(default)] - pub only_deterministic_messages: bool, // means don't sample from the model - #[serde(default)] - pub subchat_tool_parameters: IndexMap, // tool_name: {model, allowed_context, temperature} - #[serde(default = "PostprocessSettings::new")] - pub postprocess_parameters: PostprocessSettings, - #[serde(default)] - pub meta: ChatMeta, - #[serde(default)] - pub style: Option, -} - #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ChatMeta { #[serde(default)] @@ -268,16 +318,18 @@ pub struct ChatMeta { pub request_attempt_id: String, #[serde(default)] pub chat_remote: bool, - #[serde(default)] - pub chat_mode: ChatMode, + #[serde(default = "default_mode_id")] + pub chat_mode: String, #[serde(default)] pub current_config_file: String, #[serde(default = "default_true")] pub include_project_info: bool, #[serde(default)] pub context_tokens_cap: Option, - #[serde(default)] - pub use_compression: bool, +} + +fn default_mode_id() -> String { + "agent".to_string() } impl Default for ChatMeta { @@ -286,46 +338,105 @@ impl Default for ChatMeta { chat_id: String::new(), request_attempt_id: String::new(), chat_remote: false, - chat_mode: ChatMode::default(), + chat_mode: default_mode_id(), current_config_file: String::new(), include_project_info: true, context_tokens_cap: None, - use_compression: false, } } } -#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Copy)] -#[allow(non_camel_case_types)] -pub enum ChatMode { - NO_TOOLS, - EXPLORE, - AGENT, - CONFIGURE, - PROJECT_SUMMARY, -} - -impl ChatMode { - pub fn supports_checkpoints(self) -> bool { - match self { - ChatMode::NO_TOOLS => false, - ChatMode::AGENT | ChatMode::CONFIGURE | ChatMode::PROJECT_SUMMARY | ChatMode::EXPLORE => true, +/// Normalize a mode ID string (legacy enum values or dynamic mode IDs). +/// Handles uppercase legacy values and returns lowercase mode IDs. +/// Returns error if mode is empty or contains invalid characters. +pub fn normalize_mode_id(mode: &str) -> Result { + let trimmed = mode.trim(); + + if trimmed.is_empty() { + return Ok("agent".to_string()); + } + + // Validate characters: lowercase, digits, underscore, hyphen + if !trimmed.chars().all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_' || c == '-') { + // Try to normalize uppercase legacy values + let normalized = trimmed.to_lowercase(); + if !normalized.chars().all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_' || c == '-') { + return Err(format!("Invalid mode ID: '{}' contains invalid characters", trimmed)); } + return Ok(normalized); } + + Ok(trimmed.to_string()) +} - pub fn is_agentic(self) -> bool { - match self { - ChatMode::AGENT => true, - ChatMode::NO_TOOLS | ChatMode::EXPLORE | ChatMode::CONFIGURE | - ChatMode::PROJECT_SUMMARY => false, - } +/// Check if a mode ID is agentic (supports tool execution and knowledge enrichment). +pub fn is_agentic_mode_id(mode_id: &str) -> bool { + matches!(mode_id, "agent" | "task_planner" | "task_agent") +} + +/// Validate and canonicalize a mode ID with strict registry existence check. +/// Returns 422-compatible error if mode is invalid or doesn't exist in registry. +pub async fn validate_mode_for_request( + gcx: std::sync::Arc>, + mode: &str, +) -> Result { + let canonical = canonical_mode_id(mode)?; + + let mode_config = crate::yaml_configs::customization_registry::get_mode_config( + gcx, + &canonical, + None, + ).await; + + if mode_config.is_none() { + return Err(format!("Mode '{}' does not exist in registry", canonical)); } + + Ok(canonical) } -impl Default for ChatMode { - fn default() -> Self { - ChatMode::NO_TOOLS +/// Canonicalize a mode ID string with full validation and legacy mapping. +/// +/// This function: +/// 1. Normalizes format (lowercases, validates characters) +/// 2. Maps legacy enum values to canonical mode IDs +/// 3. Validates length (max 128 chars) +/// 4. Returns error for invalid input +/// +/// Examples: +/// - "AGENT" → "agent" +/// - "agent" → "agent" +/// - "CONFIGURE" → "configurator" +/// - "NO_TOOLS" → "explore" +/// - "my_custom_mode" → "my_custom_mode" +/// - "" → "agent" (default) +/// - "invalid!mode" → Err +pub fn canonical_mode_id(mode: &str) -> Result { + let trimmed = mode.trim(); + + if trimmed.is_empty() { + return Ok("agent".to_string()); + } + + if trimmed.len() > 128 { + return Err(format!("Mode ID too long: {} chars (max 128)", trimmed.len())); } + + let normalized = normalize_mode_id(trimmed)?; + + let canonical = match normalized.to_uppercase().as_str() { + "NO_TOOLS" => "explore".to_string(), + "EXPLORE" => "explore".to_string(), + "AGENT" => "agent".to_string(), + "CONFIGURE" | "CONFIGURATOR" => "configurator".to_string(), + "PROJECT_SUMMARY" => "project_summary".to_string(), + "PLAN" => "plan".to_string(), + "TASK_PLANNER" => "task_planner".to_string(), + "TASK_AGENT" => "task_agent".to_string(), + _ => normalized, + }; + + Ok(canonical) } fn default_true() -> bool { @@ -351,15 +462,15 @@ pub struct DiffChunk { #[serde(default)] pub struct PostprocessSettings { pub use_ast_based_pp: bool, - pub useful_background: f32, // first, fill usefulness of all lines with this - pub useful_symbol_default: f32, // when a symbol present, set usefulness higher + pub useful_background: f32, // first, fill usefulness of all lines with this + pub useful_symbol_default: f32, // when a symbol present, set usefulness higher // search results fill usefulness as it passed from outside - pub downgrade_parent_coef: f32, // goto parent from search results and mark it useful, with this coef - pub downgrade_body_coef: f32, // multiply body usefulness by this, so it's less useful than the declaration + pub downgrade_parent_coef: f32, // goto parent from search results and mark it useful, with this coef + pub downgrade_body_coef: f32, // multiply body usefulness by this, so it's less useful than the declaration pub comments_propagate_up_coef: f32, // mark comments above a symbol as useful, with this coef pub close_small_gaps: bool, - pub take_floor: f32, // take/dont value - pub max_files_n: usize, // don't produce more than n files in output + pub take_floor: f32, // take/dont value + pub max_files_n: usize, // don't produce more than n files in output } impl Default for PostprocessSettings { @@ -509,4 +620,91 @@ mod tests { }; assert!(code_completion_post_validate(&post).is_err()); } + + fn make_tool_fn(arguments: &str) -> ChatToolFunction { + ChatToolFunction { + arguments: arguments.to_string(), + name: "test_tool".to_string(), + } + } + + #[test] + fn test_parse_args_valid_object() { + let f = make_tool_fn(r#"{"key": "value", "num": 42}"#); + let args = f.parse_args().unwrap(); + assert_eq!(args["key"], "value"); + assert_eq!(args["num"], 42); + } + + #[test] + fn test_parse_args_empty_object() { + let f = make_tool_fn("{}"); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_empty_string() { + let f = make_tool_fn(""); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_whitespace_only() { + let f = make_tool_fn(" \n\t "); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_json_empty_string_literal() { + // LLM sends "" (two quote chars) — valid JSON string, not an object + let f = make_tool_fn(r#""""#); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_json_null() { + let f = make_tool_fn("null"); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_json_array() { + // An array is not an object — should normalize to {} + let f = make_tool_fn("[1, 2, 3]"); + let args = f.parse_args().unwrap(); + assert!(args.is_empty()); + } + + #[test] + fn test_parse_args_padded_with_whitespace() { + let f = make_tool_fn(r#" { "a": 1 } "#); + let args = f.parse_args().unwrap(); + assert_eq!(args["a"], 1); + } + + #[test] + fn test_parse_args_invalid_json_object() { + // Starts with '{' but is malformed — should propagate the serde error + let f = make_tool_fn("{broken json"); + assert!(f.parse_args().is_err()); + } +} + +pub fn deserialize_messages_from_post( + messages: &Vec, +) -> Result, ScratchError> { + let messages: Vec = messages + .iter() + .map(|x| serde_json::from_value(x.clone())) + .collect::, _>>() + .map_err(|e| { + tracing::error!("can't deserialize ChatMessage: {}", e); + ScratchError::new(StatusCode::BAD_REQUEST, format!("JSON problem: {}", e)) + })?; + Ok(messages) } diff --git a/refact-agent/engine/src/caps/caps.rs b/refact-agent/engine/src/caps/caps.rs index 433586279..69ddce9e9 100644 --- a/refact-agent/engine/src/caps/caps.rs +++ b/refact-agent/engine/src/caps/caps.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use indexmap::IndexMap; @@ -10,9 +11,14 @@ use tracing::{info, warn}; use crate::custom_error::MapErrToString; use crate::global_context::CommandLine; use crate::global_context::GlobalContext; -use crate::caps::providers::{add_models_to_caps, read_providers_d, resolve_provider_api_key, - post_process_provider, CapsProvider}; -use crate::caps::self_hosted::SelfHostedCaps; +use crate::caps::providers::{ + add_models_to_caps, read_providers_d, resolve_provider_api_key, post_process_provider, + CapsProvider, +}; +use crate::providers::config::ProviderDefaults; +use crate::caps::model_caps::{ModelCapabilities, get_model_caps, resolve_model_caps}; +use crate::llm::WireFormat; +use crate::providers::traits::AvailableModel; pub const CAPS_FILENAME: &str = "refact-caps"; pub const CAPS_FILENAME_FALLBACK: &str = "coding_assistant_caps.json"; @@ -34,13 +40,19 @@ pub struct BaseModelRecord { #[serde(default, skip_serializing)] pub endpoint_style: String, #[serde(default, skip_serializing)] + pub wire_format: WireFormat, + #[serde(default, skip_serializing)] pub api_key: String, #[serde(default, skip_serializing)] + pub auth_token: String, + #[serde(default, skip_serializing)] pub tokenizer_api_key: String, #[serde(default, skip_serializing)] pub support_metadata: bool, #[serde(default, skip_serializing)] + pub extra_headers: std::collections::HashMap, + #[serde(default, skip_serializing)] pub similar_models: Vec, #[serde(default)] pub tokenizer: String, @@ -49,6 +61,19 @@ pub struct BaseModelRecord { pub enabled: bool, #[serde(default)] pub experimental: bool, + + /// Use max_completion_tokens instead of max_tokens (required for OpenAI o1/o3 models) + #[serde(default)] + pub supports_max_completion_tokens: bool, + + /// Treat stream EOF as completion (for endpoints that don't send explicit Done signal) + #[serde(default)] + pub eof_is_done: bool, + + /// Enable Anthropic's server-side web_search tool + #[serde(default)] + pub supports_web_search: bool, + // Fields used for Config/UI management #[serde(skip_deserializing)] pub removable: bool, @@ -56,7 +81,9 @@ pub struct BaseModelRecord { pub user_configured: bool, } -fn default_true() -> bool { true } +fn default_true() -> bool { + true +} pub trait HasBaseModelRecord { fn base(&self) -> &BaseModelRecord; @@ -68,8 +95,10 @@ pub struct ChatModelRecord { #[serde(flatten)] pub base: BaseModelRecord, + #[allow(dead_code)] // Deserialized from API but not used internally #[serde(default = "default_chat_scratchpad", skip_serializing)] pub scratchpad: String, + #[allow(dead_code)] // Deserialized from API but not used internally #[serde(default, skip_serializing)] pub scratchpad_patch: serde_json::Value, @@ -82,18 +111,60 @@ pub struct ChatModelRecord { #[serde(default)] pub supports_agent: bool, #[serde(default)] - pub supports_reasoning: Option, + pub reasoning_effort_options: Option>, + #[serde(default)] + pub supports_thinking_budget: bool, #[serde(default)] - pub supports_boost_reasoning: bool, + pub supports_adaptive_thinking_budget: bool, + #[serde(default)] + pub max_thinking_tokens: Option, #[serde(default)] pub default_temperature: Option, + #[serde(default)] + pub default_frequency_penalty: Option, + #[serde(default)] + pub default_max_tokens: Option, + #[serde(default)] + pub max_output_tokens: Option, + #[serde(default)] + pub supports_strict_tools: bool, + #[serde(default = "default_true")] + pub supports_temperature: bool, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub available_providers: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub selected_provider: Option, } -pub fn default_chat_scratchpad() -> String { "PASSTHROUGH".to_string() } +pub fn default_chat_scratchpad() -> String { + String::new() +} + +impl ChatModelRecord { + pub fn has_reasoning_support(&self) -> bool { + self.reasoning_effort_options.is_some() + || self.supports_thinking_budget + || self.supports_adaptive_thinking_budget + } + + pub fn reasoning_type_string(&self) -> Option { + if self.supports_adaptive_thinking_budget { + Some("anthropic_effort".to_string()) + } else if self.supports_thinking_budget { + Some("anthropic_budget".to_string()) + } else { + None + } + } +} impl HasBaseModelRecord for ChatModelRecord { - fn base(&self) -> &BaseModelRecord { &self.base } - fn base_mut(&mut self) -> &mut BaseModelRecord { &mut self.base } + fn base(&self) -> &BaseModelRecord { + &self.base + } + fn base_mut(&mut self) -> &mut BaseModelRecord { + &mut self.base + } } #[derive(Debug, Serialize, Clone, Deserialize, Default)] @@ -119,31 +190,24 @@ pub enum CompletionModelFamily { DeepseekCoder, } -impl CompletionModelFamily { - pub fn to_string(self) -> String { - serde_json::to_value(self).ok() - .and_then(|v| v.as_str().map(|s| s.to_string())).unwrap_or_default() - } - - pub fn all_variants() -> Vec { - vec![ - CompletionModelFamily::Qwen2_5CoderBase, - CompletionModelFamily::Starcoder, - CompletionModelFamily::DeepseekCoder, - ] - } +pub fn default_completion_scratchpad() -> String { + "FIM-PSM".to_string() } -pub fn default_completion_scratchpad() -> String { "REPLACE_PASSTHROUGH".to_string() } - -pub fn default_completion_scratchpad_patch() -> serde_json::Value { serde_json::json!({ - "context_format": "chat", - "rag_ratio": 0.5 -}) } +pub fn default_completion_scratchpad_patch() -> serde_json::Value { + serde_json::json!({ + "context_format": "chat", + "rag_ratio": 0.5 + }) +} impl HasBaseModelRecord for CompletionModelRecord { - fn base(&self) -> &BaseModelRecord { &self.base } - fn base_mut(&mut self) -> &mut BaseModelRecord { &mut self.base } + fn base(&self) -> &BaseModelRecord { + &self.base + } + fn base_mut(&mut self) -> &mut BaseModelRecord { + &mut self.base + } } #[derive(Debug, Serialize, Clone, Default, PartialEq)] @@ -156,31 +220,55 @@ pub struct EmbeddingModelRecord { pub embedding_batch: usize, } -pub fn default_rejection_threshold() -> f32 { 0.63 } +pub fn default_rejection_threshold() -> f32 { + 0.63 +} -pub fn default_embedding_batch() -> usize { 64 } +pub fn default_embedding_batch() -> usize { + 64 +} impl HasBaseModelRecord for EmbeddingModelRecord { - fn base(&self) -> &BaseModelRecord { &self.base } - fn base_mut(&mut self) -> &mut BaseModelRecord { &mut self.base } + fn base(&self) -> &BaseModelRecord { + &self.base + } + fn base_mut(&mut self) -> &mut BaseModelRecord { + &mut self.base + } } impl EmbeddingModelRecord { pub fn is_configured(&self) -> bool { - !self.base.name.is_empty() && (self.embedding_size > 0 || self.embedding_batch > 0 || self.base.n_ctx > 0) + !self.base.name.is_empty() + && (self.embedding_size > 0 || self.embedding_batch > 0 || self.base.n_ctx > 0) } } -#[derive(Debug, Serialize, Deserialize, Clone, Default)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct CapsMetadata { + #[serde(default = "default_pricing")] pub pricing: serde_json::Value, - pub features: Vec + #[serde(default)] + pub features: Vec, +} + +fn default_pricing() -> serde_json::Value { + serde_json::json!({}) +} + +impl Default for CapsMetadata { + fn default() -> Self { + Self { + pricing: default_pricing(), + features: Vec::new(), + } + } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct CodeAssistantCaps { #[serde(deserialize_with = "normalize_string")] - pub cloud_name: String, // "refact" or "refact_self_hosted" + pub cloud_name: String, #[serde(default = "default_telemetry_basic_dest")] pub telemetry_basic_dest: String, @@ -188,7 +276,7 @@ pub struct CodeAssistantCaps { pub telemetry_basic_retrieve_my_own: String, #[serde(skip_deserializing)] - pub completion_models: IndexMap>, // keys are "provider/model" + pub completion_models: IndexMap>, #[serde(skip_deserializing)] pub chat_models: IndexMap>, #[serde(skip_deserializing)] @@ -198,16 +286,22 @@ pub struct CodeAssistantCaps { pub defaults: DefaultModels, #[serde(default)] - pub caps_version: i64, // need to reload if it increases on server, that happens when server configuration changes + pub caps_version: i64, #[serde(default)] - pub customization: String, // on self-hosting server, allows to customize yaml_configs & friends for all engineers + pub customization: String, #[serde(default = "default_hf_tokenizer_template")] - pub hf_tokenizer_template: String, // template for HuggingFace tokenizer URLs + pub hf_tokenizer_template: String, + + #[serde(default)] + pub metadata: CapsMetadata, + + #[serde(skip)] + pub model_caps: Arc>, - #[serde(default)] // Need for metadata from cloud, e.g. pricing for models; used only in chat-js - pub metadata: CapsMetadata + #[serde(skip)] + pub user_defaults: ProviderDefaults, } fn default_telemetry_retrieve_my_own() -> String { @@ -222,14 +316,28 @@ fn default_telemetry_basic_dest() -> String { "https://www.smallcloud.ai/v1/telemetry-basic".to_string() } -pub fn normalize_string<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result { +pub fn normalize_string<'de, D: serde::Deserializer<'de>>( + deserializer: D, +) -> Result { let s: String = String::deserialize(deserializer)?; - Ok(s.chars().map(|c| if c.is_alphanumeric() { c.to_ascii_lowercase() } else { '_' }).collect()) + Ok(s.chars() + .map(|c| { + if c.is_alphanumeric() { + c.to_ascii_lowercase() + } else { + '_' + } + }) + .collect()) } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct DefaultModels { - #[serde(default, alias = "code_completion_default_model", alias = "completion_model")] + #[serde( + default, + alias = "code_completion_default_model", + alias = "completion_model" + )] pub completion_default_model: String, #[serde(default, alias = "code_chat_default_model", alias = "chat_model")] pub chat_default_model: String, @@ -273,14 +381,20 @@ pub async fn load_caps_value_from_url( gcx: Arc>, ) -> Result<(serde_json::Value, String), String> { let caps_urls = if cmdline.address_url.to_lowercase() == "refact" { - vec!["https://app.refact.ai/coding_assistant_caps.json".to_string()] + vec!["https://inference.smallcloud.ai/coding_assistant_caps.json".to_string()] } else { let base_url = Url::parse(&cmdline.address_url) .map_err(|_| "failed to parse address url".to_string())?; vec![ - base_url.join(&CAPS_FILENAME).map_err(|_| "failed to join caps URL".to_string())?.to_string(), - base_url.join(&CAPS_FILENAME_FALLBACK).map_err(|_| "failed to join fallback caps URL".to_string())?.to_string(), + base_url + .join(&CAPS_FILENAME) + .map_err(|_| "failed to join caps URL".to_string())? + .to_string(), + base_url + .join(&CAPS_FILENAME_FALLBACK) + .map_err(|_| "failed to join fallback caps URL".to_string())? + .to_string(), ] }; @@ -288,8 +402,18 @@ pub async fn load_caps_value_from_url( let mut headers = reqwest::header::HeaderMap::new(); if !cmdline.api_key.is_empty() { - headers.insert(reqwest::header::AUTHORIZATION, reqwest::header::HeaderValue::from_str(&format!("Bearer {}", cmdline.api_key)).unwrap()); - headers.insert(reqwest::header::USER_AGENT, reqwest::header::HeaderValue::from_str(&format!("refact-lsp {}", crate::version::build::PKG_VERSION)).unwrap()); + headers.insert( + reqwest::header::AUTHORIZATION, + reqwest::header::HeaderValue::from_str(&format!("Bearer {}", cmdline.api_key)).unwrap(), + ); + headers.insert( + reqwest::header::USER_AGENT, + reqwest::header::HeaderValue::from_str(&format!( + "refact-lsp {}", + crate::version::build::PKG_VERSION + )) + .unwrap(), + ); } let mut last_status = 0; @@ -297,7 +421,8 @@ pub async fn load_caps_value_from_url( for url in &caps_urls { info!("fetching caps from {}", url); - let response = http_client.get(url) + let response = http_client + .get(url) .headers(headers.clone()) .send() .await @@ -310,7 +435,10 @@ pub async fn load_caps_value_from_url( return Ok((json_value, url.clone())); } last_response_json = Some(json_value.clone()); - warn!("status={}; server responded with:\n{}", last_status, json_value); + warn!( + "status={}; server responded with:\n{}", + last_status, json_value + ); } } @@ -323,33 +451,476 @@ pub async fn load_caps_value_from_url( Err(format!("cannot fetch caps, status={}", last_status)) } +/// Build ChatModelRecord from an AvailableModel and provider runtime info +fn build_chat_model_record( + provider_name: &str, + model: &AvailableModel, + model_caps: &HashMap, + runtime_wire_format: WireFormat, + runtime_endpoint: &str, + runtime_api_key: &str, + runtime_auth_token: &str, + runtime_tokenizer_api_key: &str, + runtime_support_metadata: bool, + runtime_extra_headers: &HashMap, +) -> ChatModelRecord { + let prefix = format!("{}/", provider_name); + let model_id = if model.id.starts_with(&prefix) { + model.id.clone() + } else { + format!("{}/{}", provider_name, model.id) + }; + + let resolved_caps = resolve_model_caps(model_caps, &model_id) + .or_else(|| { + if model_id.starts_with("openrouter/") { + None + } else { + resolve_model_caps(model_caps, &model.id) + } + }); + + let ( + n_ctx, + supports_tools, + supports_multimodality, + reasoning_effort_options, + supports_thinking_budget, + supports_adaptive_thinking_budget, + tokenizer, + supports_clicks, + max_output_tokens, + ) = if let Some(ref resolved) = resolved_caps { + let caps = &resolved.caps; + if model.is_custom { + let clamped_n_ctx = if caps.n_ctx > 0 { model.n_ctx.min(caps.n_ctx) } else { model.n_ctx }; + let clamped_max_output = model.max_output_tokens.map(|v| { + if caps.max_output_tokens > 0 { v.min(caps.max_output_tokens) } else { v } + }); + let tok = model.tokenizer.clone().unwrap_or_else(|| caps.tokenizer.clone()); + ( + clamped_n_ctx, + model.supports_tools, + model.supports_multimodality, + model.reasoning_effort_options.clone(), + model.supports_thinking_budget, + model.supports_adaptive_thinking_budget, + tok, + caps.supports_clicks, + clamped_max_output, + ) + } else { + let effective_n_ctx = if model.n_ctx > 0 && caps.n_ctx > 0 { + model.n_ctx.min(caps.n_ctx) + } else if caps.n_ctx > 0 { + caps.n_ctx + } else { + model.n_ctx + }; + let effective_max_output = if caps.max_output_tokens > 0 { + model.max_output_tokens + .map(|v| v.min(caps.max_output_tokens)) + .or(Some(caps.max_output_tokens)) + } else { + model.max_output_tokens + }; + ( + effective_n_ctx, + caps.supports_tools, + caps.supports_vision, + caps.reasoning_effort_options.clone(), + caps.supports_thinking_budget, + caps.supports_adaptive_thinking_budget, + caps.tokenizer.clone(), + caps.supports_clicks, + effective_max_output, + ) + } + } else { + ( + model.n_ctx, + model.supports_tools, + model.supports_multimodality, + model.reasoning_effort_options.clone(), + model.supports_thinking_budget, + model.supports_adaptive_thinking_budget, + model.tokenizer.clone().unwrap_or_else(|| "fake".to_string()), + false, + model.max_output_tokens, + ) + }; + + let supports_agent = supports_tools; + let endpoint = runtime_endpoint.replace("$MODEL", &model.id); + + let endpoint_style = match runtime_wire_format { + WireFormat::AnthropicMessages => "anthropic", + _ => "openai", + } + .to_string(); + + ChatModelRecord { + base: BaseModelRecord { + n_ctx, + name: model.id.clone(), + id: model_id, + endpoint, + endpoint_style, + wire_format: runtime_wire_format, + api_key: runtime_api_key.to_string(), + auth_token: runtime_auth_token.to_string(), + tokenizer_api_key: runtime_tokenizer_api_key.to_string(), + support_metadata: runtime_support_metadata, + extra_headers: runtime_extra_headers.clone(), + similar_models: Vec::new(), + tokenizer, + enabled: model.enabled, + experimental: false, + supports_max_completion_tokens: resolved_caps + .as_ref() + .map(|r| r.caps.supports_max_completion_tokens) + .unwrap_or(false), + eof_is_done: false, + supports_web_search: resolved_caps + .as_ref() + .map(|r| r.caps.supports_web_search) + .unwrap_or(false), + removable: model.is_custom, + user_configured: model.is_custom, + }, + scratchpad: String::new(), + scratchpad_patch: serde_json::Value::Null, + supports_tools, + supports_multimodality, + supports_clicks, + supports_agent, + reasoning_effort_options, + supports_thinking_budget, + supports_adaptive_thinking_budget, + max_thinking_tokens: resolved_caps + .as_ref() + .and_then(|r| r.caps.max_thinking_tokens), + default_temperature: resolved_caps + .as_ref() + .and_then(|r| r.caps.default_temperature), + default_frequency_penalty: None, + default_max_tokens: resolved_caps + .as_ref() + .and_then(|r| r.caps.default_max_tokens), + max_output_tokens, + supports_strict_tools: resolved_caps + .as_ref() + .map(|r| r.caps.supports_strict_tools) + .unwrap_or(false), + supports_temperature: resolved_caps + .as_ref() + .map(|r| r.caps.supports_temperature) + .unwrap_or(true), + available_providers: model.available_providers.clone(), + selected_provider: model.selected_provider.clone(), + + } +} + +pub async fn populate_chat_models_from_providers( + caps: &mut CodeAssistantCaps, + gcx: Arc>, +) { + let model_caps = &*caps.model_caps; + + let (http_client, providers_snapshot) = { + let gcx_locked = gcx.read().await; + let registry = gcx_locked.providers.read().await; + let snapshot: Vec> = + registry.iter().map(|(_, p)| p.clone_box()).collect(); + (gcx_locked.http_client.clone(), snapshot) + }; + + let mut pricing_map = caps.metadata.pricing.as_object_mut(); + + for provider in &providers_snapshot { + let runtime = match provider.build_runtime() { + Ok(r) => r, + Err(e) => { + warn!( + "Failed to build runtime for provider '{}': {}", + provider.name(), + e + ); + continue; + } + }; + + if !runtime.enabled { + continue; + } + + let available_models = provider.fetch_available_models(&http_client, model_caps).await; + + for model in available_models { + if !model.enabled { + continue; + } + + let chat_record = build_chat_model_record( + &runtime.name, + &model, + model_caps, + runtime.wire_format, + &runtime.chat_endpoint, + &runtime.api_key, + &runtime.auth_token, + &runtime.tokenizer_api_key, + runtime.support_metadata, + &runtime.extra_headers, + ); + + let model_id = chat_record.base.id.clone(); + + if let Some(ref pricing) = model.pricing { + if let Some(map) = pricing_map.as_mut() { + if let Ok(pricing_value) = serde_json::to_value(pricing) { + map.insert(model_id.clone(), pricing_value.clone()); + if !map.contains_key(&model.id) { + map.insert(model.id.clone(), pricing_value); + } + } + } + } + + caps.chat_models.insert(model_id, Arc::new(chat_record)); + } + } + + if !caps.chat_models.is_empty() { + let need_new_default = caps.defaults.chat_default_model.is_empty() + || !caps + .chat_models + .contains_key(&caps.defaults.chat_default_model); + + if need_new_default { + if let Some((first_model_id, _)) = caps.chat_models.first() { + info!("Auto-selecting default chat model: {}", first_model_id); + caps.defaults.chat_default_model = first_model_id.clone(); + } + } + + let need_new_light = caps.defaults.chat_light_model.is_empty() + || !caps + .chat_models + .contains_key(&caps.defaults.chat_light_model); + if need_new_light && !caps.defaults.chat_default_model.is_empty() { + info!( + "Light model '{}' not available, falling back to default '{}'", + caps.defaults.chat_light_model, caps.defaults.chat_default_model + ); + caps.defaults.chat_light_model = caps.defaults.chat_default_model.clone(); + } + + let need_new_thinking = caps.defaults.chat_thinking_model.is_empty() + || !caps + .chat_models + .contains_key(&caps.defaults.chat_thinking_model); + if need_new_thinking && !caps.defaults.chat_default_model.is_empty() { + info!( + "Thinking model '{}' not available, falling back to default '{}'", + caps.defaults.chat_thinking_model, caps.defaults.chat_default_model + ); + caps.defaults.chat_thinking_model = caps.defaults.chat_default_model.clone(); + } + } +} + +fn convert_self_hosted_caps_if_needed( + caps_value: serde_json::Value, + caps_url: &str, + cmdline_api_key: &str, +) -> Result { + let obj = match caps_value.as_object() { + Some(o) => o, + None => return Ok(caps_value), + }; + + let has_nested_chat = obj.get("chat").and_then(|v| v.get("models")).is_some(); + if !has_nested_chat { + return Ok(caps_value); + } + + let support_metadata = obj.get("support_metadata") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + let tokenizer_endpoints = obj.get("tokenizer_endpoints") + .and_then(|v| v.as_object()) + .cloned() + .unwrap_or_default(); + + let mut chat_models = serde_json::Map::new(); + if let Some(chat) = obj.get("chat").and_then(|v| v.as_object()) { + let endpoint = chat.get("endpoint").and_then(|v| v.as_str()).unwrap_or(""); + if let Some(models) = chat.get("models").and_then(|v| v.as_object()) { + for (model_name, model_val) in models { + let mut record = model_val.clone(); + if let Some(rec) = record.as_object_mut() { + rec.insert("name".to_string(), serde_json::json!(model_name)); + let model_endpoint = endpoint.replace("$MODEL", model_name); + let full_endpoint = relative_to_full_url(caps_url, &model_endpoint) + .unwrap_or(model_endpoint); + rec.insert("endpoint".to_string(), serde_json::json!(full_endpoint)); + rec.insert("endpoint_style".to_string(), serde_json::json!("openai")); + rec.insert("enabled".to_string(), serde_json::json!(true)); + rec.insert("support_metadata".to_string(), serde_json::json!(support_metadata)); + if !cmdline_api_key.is_empty() { + rec.insert("api_key".to_string(), serde_json::json!(cmdline_api_key)); + } + if let Some(tok_url) = tokenizer_endpoints.get(model_name) { + if let Some(tok_str) = tok_url.as_str() { + let full_tok = relative_to_full_url(caps_url, tok_str) + .unwrap_or(tok_str.to_string()); + rec.insert("tokenizer".to_string(), serde_json::json!(full_tok)); + } + } + chat_models.insert(model_name.clone(), record); + } + } + } + } + + let mut completion_models = serde_json::Map::new(); + if let Some(completion) = obj.get("completion").and_then(|v| v.as_object()) { + let endpoint = completion.get("endpoint").and_then(|v| v.as_str()).unwrap_or(""); + if let Some(models) = completion.get("models").and_then(|v| v.as_object()) { + for (model_name, model_val) in models { + let mut record = model_val.clone(); + if let Some(rec) = record.as_object_mut() { + rec.insert("name".to_string(), serde_json::json!(model_name)); + let model_endpoint = endpoint.replace("$MODEL", model_name); + let full_endpoint = relative_to_full_url(caps_url, &model_endpoint) + .unwrap_or(model_endpoint); + rec.insert("endpoint".to_string(), serde_json::json!(full_endpoint)); + rec.insert("endpoint_style".to_string(), serde_json::json!("openai")); + rec.insert("enabled".to_string(), serde_json::json!(true)); + if !cmdline_api_key.is_empty() { + rec.insert("api_key".to_string(), serde_json::json!(cmdline_api_key)); + } + if let Some(tok_url) = tokenizer_endpoints.get(model_name) { + if let Some(tok_str) = tok_url.as_str() { + let full_tok = relative_to_full_url(caps_url, tok_str) + .unwrap_or(tok_str.to_string()); + rec.insert("tokenizer".to_string(), serde_json::json!(full_tok)); + } + } + completion_models.insert(model_name.clone(), record); + } + } + } + } + + let mut result = caps_value.clone(); + if let Some(result_obj) = result.as_object_mut() { + result_obj.insert("chat_models".to_string(), serde_json::Value::Object(chat_models)); + result_obj.insert("completion_models".to_string(), serde_json::Value::Object(completion_models)); + + if let Some(chat) = obj.get("chat").and_then(|v| v.as_object()) { + let chat_endpoint = chat.get("endpoint").and_then(|v| v.as_str()).unwrap_or(""); + let full_chat_endpoint = relative_to_full_url(caps_url, chat_endpoint) + .unwrap_or(chat_endpoint.to_string()); + result_obj.insert("chat_endpoint".to_string(), serde_json::json!(full_chat_endpoint)); + + if let Some(dm) = chat.get("default_model").and_then(|v| v.as_str()) { + if !dm.is_empty() { + result_obj.insert("chat_default_model".to_string(), serde_json::json!(dm)); + } + } + if let Some(dm) = chat.get("default_light_model").and_then(|v| v.as_str()) { + if !dm.is_empty() { + result_obj.insert("chat_light_model".to_string(), serde_json::json!(dm)); + } + } + if let Some(dm) = chat.get("default_thinking_model").and_then(|v| v.as_str()) { + if !dm.is_empty() { + result_obj.insert("chat_thinking_model".to_string(), serde_json::json!(dm)); + } + } + } + + if let Some(completion) = obj.get("completion").and_then(|v| v.as_object()) { + let comp_endpoint = completion.get("endpoint").and_then(|v| v.as_str()).unwrap_or(""); + let full_comp_endpoint = relative_to_full_url(caps_url, comp_endpoint) + .unwrap_or(comp_endpoint.to_string()); + result_obj.insert("completion_endpoint".to_string(), serde_json::json!(full_comp_endpoint)); + } + + if let Some(telem) = obj.get("telemetry_endpoints").and_then(|v| v.as_object()) { + if let Some(basic) = telem.get("telemetry_basic_endpoint").and_then(|v| v.as_str()) { + result_obj.insert("telemetry_basic_dest".to_string(), serde_json::json!(basic)); + } + if let Some(own) = telem.get("telemetry_basic_retrieve_my_own_endpoint").and_then(|v| v.as_str()) { + result_obj.insert("telemetry_basic_retrieve_my_own".to_string(), serde_json::json!(own)); + } + } + } + + Ok(result) +} + pub async fn load_caps( cmdline: crate::global_context::CommandLine, gcx: Arc>, ) -> Result, String> { let (config_dir, cmdline_api_key, experimental) = { let gcx_locked = gcx.read().await; - (gcx_locked.config_dir.clone(), gcx_locked.cmdline.api_key.clone(), gcx_locked.cmdline.experimental) + ( + gcx_locked.config_dir.clone(), + gcx_locked.cmdline.api_key.clone(), + gcx_locked.cmdline.experimental, + ) }; - let (caps_value, caps_url) = load_caps_value_from_url(cmdline, gcx).await?; + let addr = cmdline.address_url.trim().to_string(); + let is_refact = addr.eq_ignore_ascii_case("refact"); + let has_cloud_key = !cmdline_api_key.trim().is_empty(); + let skip_cloud = addr.is_empty() || (is_refact && !has_cloud_key); - let (mut caps, server_providers) = match serde_json::from_value::(caps_value.clone()) { - Ok(self_hosted_caps) => (self_hosted_caps.into_caps(&caps_url, &cmdline_api_key)?, Vec::new()), - Err(_) => { - let caps = serde_json::from_value::(caps_value.clone()) - .map_err_with_prefix("Failed to parse caps:")?; - let mut server_provider = serde_json::from_value::(caps_value) - .map_err_with_prefix("Failed to parse caps provider:")?; - resolve_relative_urls(&mut server_provider, &caps_url)?; - (caps, vec![server_provider]) + let (mut caps, server_providers) = if skip_cloud { + info!("Running in BYOK mode (local providers only), address_url={:?} has_key={}", addr, has_cloud_key); + (CodeAssistantCaps::default(), vec![]) + } else { + match load_caps_value_from_url(cmdline, gcx.clone()).await { + Ok((caps_value, caps_url)) => { + let caps_value = convert_self_hosted_caps_if_needed(caps_value, &caps_url, &cmdline_api_key)?; + + let mut caps = serde_json::from_value::(caps_value.clone()) + .map_err_with_prefix("Failed to parse caps:")?; + let mut server_provider = serde_json::from_value::(caps_value) + .map_err_with_prefix("Failed to parse caps provider:")?; + resolve_relative_urls(&mut server_provider, &caps_url)?; + if caps.cloud_name == "refact" { + server_provider.wire_format = WireFormat::Refact; + server_provider.support_metadata = true; + if let Some(pricing_obj) = caps.metadata.pricing.as_object() { + for model_name in pricing_obj.keys() { + if !server_provider.running_models.contains(model_name) { + server_provider.running_models.push(model_name.clone()); + } + } + } + } + + caps.telemetry_basic_dest = relative_to_full_url(&caps_url, &caps.telemetry_basic_dest)?; + caps.telemetry_basic_retrieve_my_own = + relative_to_full_url(&caps_url, &caps.telemetry_basic_retrieve_my_own)?; + + (caps, vec![server_provider]) + } + Err(e) => { + warn!("Cloud caps fetch failed ({}), falling back to local providers only", e); + (CodeAssistantCaps::default(), vec![]) + } } }; - caps.telemetry_basic_dest = relative_to_full_url(&caps_url, &caps.telemetry_basic_dest)?; - caps.telemetry_basic_retrieve_my_own = relative_to_full_url(&caps_url, &caps.telemetry_basic_retrieve_my_own)?; - - let (mut providers, error_log) = read_providers_d(server_providers, &config_dir, experimental).await; + let (mut providers, error_log) = + read_providers_d(server_providers, &config_dir, experimental).await; providers.retain(|p| p.enabled); for e in error_log { tracing::error!("{e}"); @@ -358,11 +929,164 @@ pub async fn load_caps( post_process_provider(provider, false, experimental); provider.api_key = resolve_provider_api_key(&provider, &cmdline_api_key); } + + let address_url = gcx.read().await.cmdline.address_url.clone(); + let model_caps_map = match get_model_caps(gcx.clone(), &address_url, false).await { + Ok(map) => map, + Err(e) => { + warn!("Failed to fetch model capabilities: {}, using empty map", e); + HashMap::new() + } + }; + caps.model_caps = Arc::new(model_caps_map); + if caps.cloud_name == "refact" { + let running_models: Vec = if let Some(pricing_obj) = caps.metadata.pricing.as_object() { + pricing_obj.keys().cloned().collect() + } else { + Vec::new() + }; + if !running_models.is_empty() { + let gcx_locked = gcx.write().await; + let mut registry = gcx_locked.providers.write().await; + if let Some(provider) = registry.get_mut("refact") { + provider.set_running_models(running_models); + } + drop(registry); + drop(gcx_locked); + } + } + + // Clear chat models from legacy CapsProviders that have a new ProviderTrait implementation. + // The new system (populate_chat_models_from_providers) is the sole source of truth for + // chat models — it respects enabled_models selection. Legacy running_models from YAML + // templates would otherwise bypass model selection, showing all template models. + // Only chat_models are cleared; completion_models and embedding_model are preserved + // since the new system doesn't handle those yet. + { + let gcx_locked = gcx.read().await; + let registry = gcx_locked.providers.read().await; + for p in &mut providers { + if registry.get(&p.name).is_some() && !p.chat_models.is_empty() { + info!( + "Clearing {} legacy chat models for provider '{}' — handled by new provider system", + p.chat_models.len(), + p.name + ); + p.chat_models.clear(); + } + } + } + add_models_to_caps(&mut caps, providers); + populate_chat_models_from_providers(&mut caps, gcx.clone()).await; + apply_model_caps_to_all_chat_models(&mut caps); + + match ProviderDefaults::load(&config_dir).await { + Ok(user_defaults) => { + let resolve_user_model = |model: &str, chat_models: &IndexMap>| -> Option { + if model.is_empty() { + return None; + } + if chat_models.contains_key(model) { + return Some(model.to_string()); + } + if !model.contains('/') { + for key in chat_models.keys() { + if let Some(name) = key.split('/').last() { + if name == model { + return Some(key.clone()); + } + } + } + } + None + }; + + if let Some(model) = &user_defaults.chat.model { + match resolve_user_model(model, &caps.chat_models) { + Some(resolved) => caps.defaults.chat_default_model = resolved, + None if !model.is_empty() => warn!( + "User default chat model '{}' not found in available models, ignoring", model + ), + _ => {} + } + } + if let Some(model) = &user_defaults.chat_light.model { + match resolve_user_model(model, &caps.chat_models) { + Some(resolved) => caps.defaults.chat_light_model = resolved, + None if !model.is_empty() => warn!( + "User default light model '{}' not found in available models, ignoring", model + ), + _ => {} + } + } + if let Some(model) = &user_defaults.chat_thinking.model { + match resolve_user_model(model, &caps.chat_models) { + Some(resolved) => caps.defaults.chat_thinking_model = resolved, + None if !model.is_empty() => warn!( + "User default thinking model '{}' not found in available models, ignoring", model + ), + _ => {} + } + } + caps.user_defaults = user_defaults; + } + Err(e) => { + warn!( + "Failed to load user defaults from providers.d/defaults.yaml: {}", + e + ); + } + } + + validate_default_models(&caps)?; Ok(Arc::new(caps)) } +fn validate_default_models(caps: &CodeAssistantCaps) -> Result<(), String> { + if !caps.defaults.chat_default_model.is_empty() { + if !caps + .chat_models + .contains_key(&caps.defaults.chat_default_model) + { + if resolve_model_caps(&caps.model_caps, &caps.defaults.chat_default_model).is_none() { + warn!( + "Default chat model '{}' is not in chat_models and not found in model capabilities registry", + caps.defaults.chat_default_model + ); + } + } + } + if !caps.defaults.chat_thinking_model.is_empty() { + if !caps + .chat_models + .contains_key(&caps.defaults.chat_thinking_model) + { + if resolve_model_caps(&caps.model_caps, &caps.defaults.chat_thinking_model).is_none() { + warn!( + "Default thinking model '{}' is not in chat_models and not found in model capabilities registry", + caps.defaults.chat_thinking_model + ); + } + } + } + if !caps.defaults.chat_light_model.is_empty() { + if !caps + .chat_models + .contains_key(&caps.defaults.chat_light_model) + { + if resolve_model_caps(&caps.model_caps, &caps.defaults.chat_light_model).is_none() { + warn!( + "Default light model '{}' is not in chat_models and not found in model capabilities registry", + caps.defaults.chat_light_model + ); + } + } + } + Ok(()) +} + pub fn resolve_relative_urls(provider: &mut CapsProvider, caps_url: &str) -> Result<(), String> { provider.chat_endpoint = relative_to_full_url(caps_url, &provider.chat_endpoint)?; provider.completion_endpoint = relative_to_full_url(caps_url, &provider.completion_endpoint)?; @@ -374,18 +1098,16 @@ pub fn strip_model_from_finetune(model: &str) -> String { model.split(":").next().unwrap().to_string() } -pub fn relative_to_full_url( - caps_url: &str, - maybe_relative_url: &str, -) -> Result { +pub fn relative_to_full_url(caps_url: &str, maybe_relative_url: &str) -> Result { if maybe_relative_url.starts_with("http") { Ok(maybe_relative_url.to_string()) } else if maybe_relative_url.is_empty() { Ok("".to_string()) } else { - let base_url = Url::parse(caps_url) - .map_err(|_| format!("failed to parse caps url: {}", caps_url))?; - let joined_url = base_url.join(maybe_relative_url) + let base_url = + Url::parse(caps_url).map_err(|_| format!("failed to parse caps url: {}", caps_url))?; + let joined_url = base_url + .join(maybe_relative_url) .map_err(|_| format!("failed to join url: {}", maybe_relative_url))?; Ok(joined_url.to_string()) } @@ -395,12 +1117,18 @@ pub fn resolve_model<'a, T>( models: &'a IndexMap>, model_id: &str, ) -> Result, String> { - models.get(model_id).or_else( - || models.get(&strip_model_from_finetune(model_id)) - ).cloned().ok_or(format!("Model '{}' not found. Server has the following models: {:?}", model_id, models.keys())) + models + .get(model_id) + .or_else(|| models.get(&strip_model_from_finetune(model_id))) + .cloned() + .ok_or(format!( + "Model '{}' not found. Server has the following models: {:?}", + model_id, + models.keys() + )) } -pub fn resolve_chat_model<'a>( +pub fn resolve_chat_model( caps: Arc, requested_model_id: &str, ) -> Result, String> { @@ -409,7 +1137,102 @@ pub fn resolve_chat_model<'a>( } else { &caps.defaults.chat_default_model }; - resolve_model(&caps.chat_models, model_id) + + let base_record = resolve_model(&caps.chat_models, model_id)?; + + let resolved = resolve_model_caps(&caps.model_caps, model_id); + + match resolved { + Some(resolved_caps) => { + tracing::debug!( + "Model '{}' resolved via {:?}, matched key: '{}'", + model_id, + resolved_caps.source, + resolved_caps.matched_key + ); + let mut effective = (*base_record).clone(); + apply_registry_caps_to_chat_model(&mut effective, &resolved_caps.caps); + Ok(Arc::new(effective)) + } + None => { + // Model not in registry (e.g., custom model) - use base_record as-is + // The base_record already has capabilities from build_chat_model_record + tracing::debug!( + "Model '{}' not in model_caps registry, using configured capabilities", + model_id + ); + Ok(base_record) + } + } +} + +fn apply_model_caps_to_all_chat_models(caps: &mut CodeAssistantCaps) { + let model_ids: Vec = caps.chat_models.keys().cloned().collect(); + for model_id in model_ids { + if let Some(resolved) = resolve_model_caps(&caps.model_caps, &model_id) { + if let Some(record) = caps.chat_models.get(&model_id) { + let mut updated = (**record).clone(); + apply_registry_caps_to_chat_model(&mut updated, &resolved.caps); + caps.chat_models.insert(model_id, Arc::new(updated)); + } + } + } +} + +fn apply_registry_caps_to_chat_model(record: &mut ChatModelRecord, caps: &ModelCapabilities) { + if record.base.user_configured { + if caps.n_ctx > 0 { + record.base.n_ctx = record.base.n_ctx.min(caps.n_ctx); + } + if caps.max_output_tokens > 0 { + record.max_output_tokens = record.max_output_tokens + .map(|v| v.min(caps.max_output_tokens)) + .or(Some(caps.max_output_tokens)); + } + if record.base.tokenizer.is_empty() && !caps.tokenizer.is_empty() { + record.base.tokenizer = caps.tokenizer.clone(); + } + if record.default_temperature.is_none() { + record.default_temperature = caps.default_temperature; + } + if record.default_max_tokens.is_none() { + record.default_max_tokens = caps.default_max_tokens; + } + record.base.supports_max_completion_tokens = caps.supports_max_completion_tokens; + return; + } + + if caps.n_ctx > 0 { + record.base.n_ctx = if record.base.n_ctx > 0 { + record.base.n_ctx.min(caps.n_ctx) + } else { + caps.n_ctx + }; + } + record.base.supports_max_completion_tokens = caps.supports_max_completion_tokens; + + record.supports_tools = caps.supports_tools; + record.supports_strict_tools = caps.supports_strict_tools; + record.supports_multimodality = caps.supports_vision; + record.supports_clicks = caps.supports_clicks; + record.default_temperature = caps.default_temperature; + record.default_max_tokens = caps.default_max_tokens; + if caps.max_output_tokens > 0 { + record.max_output_tokens = record.max_output_tokens + .map(|v| v.min(caps.max_output_tokens)) + .or(Some(caps.max_output_tokens)); + } + + if !caps.tokenizer.is_empty() { + record.base.tokenizer = caps.tokenizer.clone(); + } + + record.reasoning_effort_options = caps.reasoning_effort_options.clone(); + record.supports_thinking_budget = caps.supports_thinking_budget; + record.supports_adaptive_thinking_budget = caps.supports_adaptive_thinking_budget; + record.supports_agent = caps.supports_tools; + record.supports_temperature = caps.supports_temperature; + record.base.supports_web_search = caps.supports_web_search; } pub fn resolve_completion_model<'a>( @@ -426,10 +1249,8 @@ pub fn resolve_completion_model<'a>( match resolve_model(&caps.completion_models, model_id) { Ok(model) => Ok(model), Err(first_err) if try_refact_fallbacks => { - if let Ok(model) = resolve_model(&caps.completion_models, &format!("refact/{model_id}")) { - return Ok(model); - } - if let Ok(model) = resolve_model(&caps.completion_models, &format!("refact_self_hosted/{model_id}")) { + if let Ok(model) = resolve_model(&caps.completion_models, &format!("refact/{model_id}")) + { return Ok(model); } Err(first_err) @@ -438,6 +1259,7 @@ pub fn resolve_completion_model<'a>( } } +#[allow(dead_code)] pub fn is_cloud_model(model_id: &str) -> bool { model_id.starts_with("refact/") } diff --git a/refact-agent/engine/src/caps/mod.rs b/refact-agent/engine/src/caps/mod.rs index bc3e848db..e37fc43d7 100644 --- a/refact-agent/engine/src/caps/mod.rs +++ b/refact-agent/engine/src/caps/mod.rs @@ -1,5 +1,5 @@ pub mod caps; +pub mod model_caps; pub mod providers; -pub mod self_hosted; pub use caps::*; diff --git a/refact-agent/engine/src/caps/model_caps.rs b/refact-agent/engine/src/caps/model_caps.rs new file mode 100644 index 000000000..dc05c8ba7 --- /dev/null +++ b/refact-agent/engine/src/caps/model_caps.rs @@ -0,0 +1,909 @@ +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::{Arc, OnceLock}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::{Duration, SystemTime}; + +use serde::{Deserialize, Serialize}; +use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; +use tracing::{info, warn}; + +use crate::global_context::GlobalContext; + +static REFRESH_LOCK: OnceLock> = OnceLock::new(); +static FIRST_CALL: AtomicBool = AtomicBool::new(true); + +fn get_refresh_lock() -> &'static AMutex<()> { + REFRESH_LOCK.get_or_init(|| AMutex::new(())) +} + +const SMALLCLOUD_MODEL_CAPS_URL: &str = "https://inference.smallcloud.ai/v1/model-capabilities"; +const CACHE_FILENAME: &str = "model-capabilities.json"; +const CACHE_MAX_AGE: Duration = Duration::from_secs(24 * 60 * 60); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ModelCapsSource { + Registry, + Finetune, + Custom, +} + +impl Default for ModelCapsSource { + fn default() -> Self { + Self::Registry + } +} + +#[derive(Debug, Clone)] +pub struct CanonicalNameParts { + pub original: String, + pub provider_stripped: String, + pub base_model: String, + pub is_finetune: bool, + pub last_segment: String, + pub last_segment_base: String, +} + +#[derive(Debug, Clone)] +pub struct ResolvedCaps { + pub caps: ModelCapabilities, + pub source: ModelCapsSource, + pub matched_key: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum CachingType { + None, + Auto, + Explicit, + Openai, +} + +impl Default for CachingType { + fn default() -> Self { + Self::None + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ModelCapabilities { + pub n_ctx: usize, + pub max_output_tokens: usize, + #[serde(default)] + pub supports_tools: bool, + #[serde(default)] + pub supports_strict_tools: bool, + #[serde(default)] + pub supports_vision: bool, + #[serde(default)] + pub supports_video: bool, + #[serde(default)] + pub supports_audio: bool, + #[serde(default)] + pub supports_pdf: bool, + #[serde(default)] + pub supports_clicks: bool, + #[serde(default = "default_true")] + pub supports_temperature: bool, + #[serde(default = "default_true")] + pub supports_streaming: bool, + #[serde(default)] + pub supports_max_completion_tokens: bool, + #[serde(default)] + pub reasoning_effort_options: Option>, + #[serde(default)] + pub supports_thinking_budget: bool, + #[serde(default)] + pub supports_adaptive_thinking_budget: bool, + #[serde(default)] + pub supports_parallel_tools: bool, + #[serde(default)] + pub max_thinking_tokens: Option, + #[serde(default)] + pub caching: CachingType, + #[serde(default)] + pub tokenizer: String, + #[serde(default)] + pub default_temperature: Option, + #[serde(default)] + pub default_max_tokens: Option, + #[serde(default)] + pub supports_web_search: bool, +} + +fn default_true() -> bool { + true +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CachedModelCaps { + pub fetched_at: u64, + pub models: HashMap, +} + +impl CachedModelCaps { + pub fn is_expired(&self) -> bool { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + now - self.fetched_at > CACHE_MAX_AGE.as_secs() + } +} + +fn get_cache_path() -> PathBuf { + let cache_dir = dirs::cache_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join("refact"); + cache_dir.join(CACHE_FILENAME) +} + +const MAX_REASONABLE_N_CTX: usize = 10_000_000; +const MAX_REASONABLE_OUTPUT_TOKENS: usize = 1_000_000; + +fn normalize_tokenizer(tokenizer: &str) -> String { + if tokenizer.is_empty() + || tokenizer.starts_with("hf://") + || tokenizer.starts_with("http://") + || tokenizer.starts_with("https://") + || tokenizer.starts_with("file://") + || tokenizer.starts_with("fake") + { + return tokenizer.to_string(); + } + if tokenizer.contains('/') { + return format!("hf://{}", tokenizer); + } + tokenizer.to_string() +} + +fn validate_model_caps(caps: &mut HashMap) { + for (name, cap) in caps.iter_mut() { + if cap.n_ctx > MAX_REASONABLE_N_CTX { + warn!("Model {} has unreasonable n_ctx {}, clamping to {}", name, cap.n_ctx, MAX_REASONABLE_N_CTX); + cap.n_ctx = MAX_REASONABLE_N_CTX; + } + if cap.max_output_tokens > MAX_REASONABLE_OUTPUT_TOKENS { + warn!("Model {} has unreasonable max_output_tokens {}, clamping to {}", name, cap.max_output_tokens, MAX_REASONABLE_OUTPUT_TOKENS); + cap.max_output_tokens = MAX_REASONABLE_OUTPUT_TOKENS; + } + cap.tokenizer = normalize_tokenizer(&cap.tokenizer); + } +} + +pub async fn load_cached_model_caps() -> Option { + let cache_path = get_cache_path(); + + match tokio::fs::read_to_string(&cache_path).await { + Ok(content) => match serde_json::from_str::(&content) { + Ok(mut cached) => { + validate_model_caps(&mut cached.models); + info!("Loaded model capabilities from cache: {} models", cached.models.len()); + Some(cached) + } + Err(e) => { + warn!("Failed to parse cached model capabilities (treating as cache miss): {}", e); + None + } + }, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => None, + Err(e) => { + warn!("Failed to read cached model capabilities: {}", e); + None + } + } +} + +pub async fn save_cached_model_caps(caps: &CachedModelCaps) -> Result<(), String> { + let cache_path = get_cache_path(); + + if let Some(parent) = cache_path.parent() { + tokio::fs::create_dir_all(parent).await + .map_err(|e| format!("Failed to create cache directory: {}", e))?; + } + + let content = serde_json::to_string_pretty(caps) + .map_err(|e| format!("Failed to serialize model capabilities: {}", e))?; + tokio::fs::write(&cache_path, content).await + .map_err(|e| format!("Failed to write model capabilities cache: {}", e))?; + info!("Saved model capabilities to cache: {}", cache_path.display()); + Ok(()) +} + +fn build_model_caps_url(address_url: &str) -> Result { + let address_url = address_url.trim(); + if address_url.is_empty() || address_url.eq_ignore_ascii_case("refact") { + return Ok(SMALLCLOUD_MODEL_CAPS_URL.to_string()); + } + + let base_url = url::Url::parse(address_url) + .map_err(|e| format!("Invalid address_url '{}': {}", address_url, e))?; + base_url + .join("v1/model-capabilities") + .map(|u| u.to_string()) + .map_err(|e| format!("Failed to construct model-capabilities URL: {}", e)) +} + +pub async fn fetch_model_caps_from_server( + gcx: Arc>, + address_url: &str, +) -> Result, String> { + let http_client = gcx.read().await.http_client.clone(); + let model_caps_url = build_model_caps_url(address_url)?; + + info!("Fetching model capabilities from {}", model_caps_url); + + let response = http_client + .get(&model_caps_url) + .timeout(Duration::from_secs(30)) + .send() + .await + .map_err(|e| format!("Failed to fetch model capabilities: {}", e))?; + + let status = response.status(); + if !status.is_success() { + return Err(format!("Server returned status {}", status)); + } + + let models: HashMap = response + .json() + .await + .map_err(|e| format!("Failed to parse model capabilities response: {}", e))?; + + info!("Fetched {} model capabilities from server", models.len()); + Ok(models) +} + +pub async fn get_model_caps( + gcx: Arc>, + address_url: &str, + force_refresh: bool, +) -> Result, String> { + let _refresh_guard = get_refresh_lock().lock().await; + + let first_call = FIRST_CALL.swap(false, Ordering::SeqCst); + let should_refresh = force_refresh || first_call; + + if !should_refresh { + if let Some(cached) = load_cached_model_caps().await { + if !cached.is_expired() { + return Ok(cached.models); + } + info!("Cached model capabilities expired, fetching fresh data"); + } + } else if first_call { + info!("First model capabilities request, fetching fresh data"); + } + + match fetch_model_caps_from_server(gcx, address_url).await { + Ok(mut models) => { + validate_model_caps(&mut models); + let cached = CachedModelCaps { + fetched_at: SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + models: models.clone(), + }; + if let Err(e) = save_cached_model_caps(&cached).await { + warn!("Failed to save model capabilities cache: {}", e); + } + Ok(models) + } + Err(e) => { + warn!("Failed to fetch model capabilities from server: {}", e); + if let Some(cached) = load_cached_model_caps().await { + warn!("Using expired cached model capabilities as fallback"); + return Ok(cached.models); + } + Err(e) + } + } +} + +pub fn is_model_supported(caps: &HashMap, model_name: &str) -> bool { + resolve_model_caps(caps, model_name).is_some() +} + +pub fn canonicalize_model_name(model_id: &str) -> CanonicalNameParts { + let provider_stripped = if let Some(pos) = model_id.find('/') { + model_id[pos + 1..].to_string() + } else { + model_id.to_string() + }; + + let (base_model, is_finetune) = if let Some(colon_pos) = provider_stripped.find(':') { + let base = provider_stripped[..colon_pos].to_string(); + let suffix = &provider_stripped[colon_pos + 1..]; + let is_ft = suffix.starts_with("ft-") || suffix.starts_with("ft_"); + (base, is_ft) + } else { + (provider_stripped.clone(), false) + }; + + let last_segment = model_id.split('/').last().unwrap_or(model_id).to_string(); + let last_segment_base = if let Some(colon_pos) = last_segment.find(':') { + last_segment[..colon_pos].to_string() + } else { + last_segment.clone() + }; + + CanonicalNameParts { + original: model_id.to_string(), + provider_stripped, + base_model, + is_finetune, + last_segment, + last_segment_base, + } +} + +/// Known suffixes added by cloud providers that don't change model capabilities. +/// Stripping these allows matching e.g. "gemini-3-flash-preview" → "gemini-3-flash". +const IGNORABLE_SUFFIXES: &[&str] = &[ + "-latest", + "-preview", + "-cheap", + "-deep-research", + "-fp4", + "-fp8", + "-fp16", + "-int4", + "-int8", +]; + +/// Normalize a model name for fuzzy matching: +/// - lowercase +/// - strip known ignorable suffixes (repeatedly, to handle e.g. "-preview-cheap") +/// - replace '.' with '-' (e.g. "claude-opus-4.6" → "claude-opus-4-6") +fn normalize_model_name_for_matching(name: &str) -> String { + let mut result = name.to_lowercase(); + loop { + let mut changed = false; + for suffix in IGNORABLE_SUFFIXES { + if result.ends_with(suffix) { + result.truncate(result.len() - suffix.len()); + changed = true; + } + } + if !changed { + break; + } + } + result = result.replace('.', "-"); + result +} + +fn matches_pattern(pattern: &str, name: &str) -> bool { + if !pattern.contains('*') { + return pattern == name; + } + + if pattern.ends_with('*') { + let prefix = &pattern[..pattern.len() - 1]; + return name.starts_with(prefix); + } + + if pattern.starts_with('*') { + let suffix = &pattern[1..]; + return name.ends_with(suffix); + } + + if let Some(star_pos) = pattern.find('*') { + let prefix = &pattern[..star_pos]; + let suffix = &pattern[star_pos + 1..]; + return name.starts_with(prefix) && name.ends_with(suffix); + } + + false +} + +fn pattern_specificity(pattern: &str) -> usize { + pattern.chars().filter(|c| *c != '*').count() +} + +pub fn resolve_model_caps( + caps: &HashMap, + model_name: &str, +) -> Option { + let canonical = canonicalize_model_name(model_name); + + let names_to_try = [ + &canonical.original, + &canonical.provider_stripped, + &canonical.base_model, + &canonical.last_segment, + &canonical.last_segment_base, + ]; + + // Phase 1: Exact case-sensitive match + for name in &names_to_try { + if let Some(model_caps) = caps.get(*name) { + let source = if canonical.is_finetune && (*name == &canonical.base_model || *name == &canonical.last_segment_base) { + ModelCapsSource::Finetune + } else { + ModelCapsSource::Registry + }; + return Some(ResolvedCaps { + caps: model_caps.clone(), + source, + matched_key: (*name).clone(), + }); + } + } + + // Phase 2: Normalized matching (case-insensitive + suffix stripping + dot→dash) + let normalized_names: Vec = names_to_try.iter() + .map(|n| normalize_model_name_for_matching(n)) + .collect(); + + // Deduplicate normalized names while preserving order + let mut seen = std::collections::HashSet::new(); + let unique_normalized: Vec<&String> = normalized_names.iter() + .filter(|n| seen.insert(n.as_str().to_string())) + .collect(); + + for (key, model_caps) in caps.iter() { + if key.contains('*') { + continue; + } + let key_normalized = normalize_model_name_for_matching(key); + for norm_name in &unique_normalized { + if key_normalized == **norm_name { + let source = if canonical.is_finetune { + ModelCapsSource::Finetune + } else { + ModelCapsSource::Registry + }; + return Some(ResolvedCaps { + caps: model_caps.clone(), + source, + matched_key: key.clone(), + }); + } + } + } + + // Phase 3: Wildcard pattern matching (case-sensitive first) + let mut best_match: Option<(&str, &ModelCapabilities, usize)> = None; + + for (pattern, model_caps) in caps.iter() { + if !pattern.contains('*') { + continue; + } + + for name in &names_to_try { + if matches_pattern(pattern, name) { + let specificity = pattern_specificity(pattern); + if best_match.is_none() || specificity > best_match.unwrap().2 { + best_match = Some((pattern, model_caps, specificity)); + } else if specificity == best_match.unwrap().2 && pattern.as_str() < best_match.unwrap().0 { + best_match = Some((pattern, model_caps, specificity)); + } + } + } + } + + // Phase 4: Wildcard pattern matching with normalized names + if best_match.is_none() { + for (pattern, model_caps) in caps.iter() { + if !pattern.contains('*') { + continue; + } + let pattern_normalized = normalize_model_name_for_matching(pattern); + for norm_name in &unique_normalized { + if matches_pattern(&pattern_normalized, norm_name) { + let specificity = pattern_specificity(&pattern_normalized); + if best_match.is_none() || specificity > best_match.unwrap().2 { + best_match = Some((pattern, model_caps, specificity)); + } else if specificity == best_match.unwrap().2 && pattern.as_str() < best_match.unwrap().0 { + best_match = Some((pattern, model_caps, specificity)); + } + } + } + } + } + + best_match.map(|(matched_key, model_caps, _)| { + let source = if canonical.is_finetune { + ModelCapsSource::Finetune + } else { + ModelCapsSource::Registry + }; + ResolvedCaps { + caps: model_caps.clone(), + source, + matched_key: matched_key.to_string(), + } + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_model_capability_lookup() { + let mut caps = HashMap::new(); + caps.insert("gpt-4o".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + supports_tools: true, + supports_vision: true, + ..Default::default() + }); + caps.insert("claude-3-5-sonnet".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 8192, + supports_tools: true, + supports_vision: true, + supports_pdf: true, + ..Default::default() + }); + + assert!(resolve_model_caps(&caps, "gpt-4o").is_some()); + assert!(resolve_model_caps(&caps, "openai/gpt-4o").is_some()); + assert!(resolve_model_caps(&caps, "gpt-4o:v2").is_some()); + assert!(resolve_model_caps(&caps, "claude-3-5-sonnet").is_some()); + assert!(resolve_model_caps(&caps, "unknown-model").is_none()); + } + + #[test] + fn test_canonicalize_model_name() { + let parts = canonicalize_model_name("openai/gpt-4o"); + assert_eq!(parts.provider_stripped, "gpt-4o"); + assert_eq!(parts.base_model, "gpt-4o"); + assert_eq!(parts.last_segment, "gpt-4o"); + assert!(!parts.is_finetune); + + let parts = canonicalize_model_name("gpt-4o:ft-abc123"); + assert_eq!(parts.provider_stripped, "gpt-4o:ft-abc123"); + assert_eq!(parts.base_model, "gpt-4o"); + assert!(parts.is_finetune); + + let parts = canonicalize_model_name("anthropic/claude-3-5-sonnet:ft-xyz"); + assert_eq!(parts.provider_stripped, "claude-3-5-sonnet:ft-xyz"); + assert_eq!(parts.base_model, "claude-3-5-sonnet"); + assert!(parts.is_finetune); + + let parts = canonicalize_model_name("openrouter/anthropic/claude-3.7-sonnet"); + assert_eq!(parts.provider_stripped, "anthropic/claude-3.7-sonnet"); + assert_eq!(parts.base_model, "anthropic/claude-3.7-sonnet"); + assert_eq!(parts.last_segment, "claude-3.7-sonnet"); + assert_eq!(parts.last_segment_base, "claude-3.7-sonnet"); + assert!(!parts.is_finetune); + + let parts = canonicalize_model_name("models/gemini-2.0-flash"); + assert_eq!(parts.provider_stripped, "gemini-2.0-flash"); + assert_eq!(parts.last_segment, "gemini-2.0-flash"); + } + + #[test] + fn test_pattern_matching() { + let mut caps = HashMap::new(); + caps.insert("claude-3-7-sonnet*".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 16384, + supports_tools: true, + ..Default::default() + }); + caps.insert("gpt-4*".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 8192, + supports_tools: true, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "claude-3-7-sonnet-latest").unwrap(); + assert_eq!(resolved.matched_key, "claude-3-7-sonnet*"); + assert_eq!(resolved.caps.n_ctx, 200000); + + let resolved = resolve_model_caps(&caps, "gpt-4o").unwrap(); + assert_eq!(resolved.matched_key, "gpt-4*"); + } + + #[test] + fn test_finetune_source() { + let mut caps = HashMap::new(); + caps.insert("gpt-4o".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "gpt-4o:ft-abc123").unwrap(); + assert_eq!(resolved.source, ModelCapsSource::Finetune); + assert_eq!(resolved.matched_key, "gpt-4o"); + } + + #[test] + fn test_reasoning_effort_options_serde() { + let caps = ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + reasoning_effort_options: Some(vec!["low".to_string(), "medium".to_string(), "high".to_string()]), + supports_thinking_budget: true, + supports_adaptive_thinking_budget: false, + ..Default::default() + }; + let json = serde_json::to_string(&caps).unwrap(); + assert!(json.contains("\"reasoning_effort_options\":[\"low\",\"medium\",\"high\"]")); + assert!(json.contains("\"supports_thinking_budget\":true")); + assert!(json.contains("\"supports_adaptive_thinking_budget\":false")); + + let parsed: ModelCapabilities = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.reasoning_effort_options, Some(vec!["low".to_string(), "medium".to_string(), "high".to_string()])); + assert!(parsed.supports_thinking_budget); + assert!(!parsed.supports_adaptive_thinking_budget); + } + + #[test] + fn test_caching_type_serde() { + let json = serde_json::to_string(&CachingType::Explicit).unwrap(); + assert_eq!(json, "\"explicit\""); + + let parsed: CachingType = serde_json::from_str("\"auto\"").unwrap(); + assert_eq!(parsed, CachingType::Auto); + } + + #[test] + fn test_multi_slash_openrouter_models() { + let mut caps = HashMap::new(); + caps.insert("claude-3.7-sonnet".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 16384, + supports_tools: true, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "openrouter/anthropic/claude-3.7-sonnet"); + assert!(resolved.is_some()); + let resolved = resolved.unwrap(); + assert_eq!(resolved.matched_key, "claude-3.7-sonnet"); + assert_eq!(resolved.caps.n_ctx, 200000); + } + + #[test] + fn test_gemini_models_prefix() { + let mut caps = HashMap::new(); + caps.insert("gemini-2.0-flash".to_string(), ModelCapabilities { + n_ctx: 1000000, + max_output_tokens: 8192, + supports_tools: true, + supports_vision: true, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "models/gemini-2.0-flash"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "gemini-2.0-flash"); + } + + #[test] + fn test_capability_fields_completeness() { + let caps = ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + supports_tools: true, + supports_strict_tools: true, + supports_vision: true, + supports_max_completion_tokens: true, + reasoning_effort_options: Some(vec!["low".to_string(), "medium".to_string(), "high".to_string()]), + supports_thinking_budget: true, + supports_temperature: false, + ..Default::default() + }; + + assert!(caps.supports_strict_tools); + assert!(caps.supports_max_completion_tokens); + assert!(!caps.supports_temperature); + assert_eq!(caps.reasoning_effort_options, Some(vec!["low".to_string(), "medium".to_string(), "high".to_string()])); + assert!(caps.supports_thinking_budget); + } + + #[test] + fn test_validation_clamps_values() { + let mut caps = HashMap::new(); + caps.insert("test-model".to_string(), ModelCapabilities { + n_ctx: 999_999_999, + max_output_tokens: 999_999_999, + ..Default::default() + }); + + validate_model_caps(&mut caps); + + let model = caps.get("test-model").unwrap(); + assert_eq!(model.n_ctx, MAX_REASONABLE_N_CTX); + assert_eq!(model.max_output_tokens, MAX_REASONABLE_OUTPUT_TOKENS); + } + + #[test] + fn test_pattern_specificity_tiebreaking() { + let mut caps = HashMap::new(); + caps.insert("gpt-*".to_string(), ModelCapabilities { + n_ctx: 100000, + ..Default::default() + }); + caps.insert("gpt-4*".to_string(), ModelCapabilities { + n_ctx: 128000, + ..Default::default() + }); + caps.insert("gpt-4o*".to_string(), ModelCapabilities { + n_ctx: 200000, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "gpt-4o-mini").unwrap(); + assert_eq!(resolved.matched_key, "gpt-4o*"); + assert_eq!(resolved.caps.n_ctx, 200000); + } + + #[test] + fn test_exact_match_over_pattern() { + let mut caps = HashMap::new(); + caps.insert("gpt-4o".to_string(), ModelCapabilities { + n_ctx: 128000, + ..Default::default() + }); + caps.insert("gpt-4*".to_string(), ModelCapabilities { + n_ctx: 100000, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "gpt-4o").unwrap(); + assert_eq!(resolved.matched_key, "gpt-4o"); + assert_eq!(resolved.caps.n_ctx, 128000); + } + + #[test] + fn test_normalize_tokenizer() { + assert_eq!(normalize_tokenizer(""), ""); + assert_eq!(normalize_tokenizer("hf://Xenova/claude-tokenizer"), "hf://Xenova/claude-tokenizer"); + assert_eq!(normalize_tokenizer("http://example.com/tokenizer.json"), "http://example.com/tokenizer.json"); + assert_eq!(normalize_tokenizer("https://example.com/tokenizer.json"), "https://example.com/tokenizer.json"); + assert_eq!(normalize_tokenizer("file:///path/to/tokenizer.json"), "file:///path/to/tokenizer.json"); + assert_eq!(normalize_tokenizer("fake"), "fake"); + assert_eq!(normalize_tokenizer("fake-tokenizer"), "fake-tokenizer"); + assert_eq!(normalize_tokenizer("Xenova/claude-tokenizer"), "hf://Xenova/claude-tokenizer"); + assert_eq!(normalize_tokenizer("meta-llama/Llama-3.3-70B"), "hf://meta-llama/Llama-3.3-70B"); + assert_eq!(normalize_tokenizer("deepseek-ai/DeepSeek-V3"), "hf://deepseek-ai/DeepSeek-V3"); + assert_eq!(normalize_tokenizer("local-tokenizer"), "local-tokenizer"); + } + + #[test] + fn test_validate_normalizes_tokenizer() { + let mut caps = HashMap::new(); + caps.insert("test-model".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + tokenizer: "Xenova/claude-tokenizer".to_string(), + ..Default::default() + }); + + validate_model_caps(&mut caps); + + let model = caps.get("test-model").unwrap(); + assert_eq!(model.tokenizer, "hf://Xenova/claude-tokenizer"); + } + + #[test] + fn test_normalize_model_name_for_matching() { + assert_eq!(normalize_model_name_for_matching("claude-3-7-sonnet-latest"), "claude-3-7-sonnet"); + assert_eq!(normalize_model_name_for_matching("gemini-3-pro-preview-cheap"), "gemini-3-pro"); + assert_eq!(normalize_model_name_for_matching("o4-mini-deep-research"), "o4-mini"); + assert_eq!(normalize_model_name_for_matching("claude-opus-4.6"), "claude-opus-4-6"); + assert_eq!(normalize_model_name_for_matching("Kimi-K2-Instruct"), "kimi-k2-instruct"); + assert_eq!(normalize_model_name_for_matching("MiniMax-M2.1"), "minimax-m2-1"); + assert_eq!(normalize_model_name_for_matching("llama-3-70b-fp8"), "llama-3-70b"); + assert_eq!(normalize_model_name_for_matching("gpt-4o"), "gpt-4o"); + } + + #[test] + fn test_case_insensitive_matching() { + let mut caps = HashMap::new(); + caps.insert("kimi-k2-instruct".to_string(), ModelCapabilities { + n_ctx: 131000, + max_output_tokens: 32768, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "Kimi-K2-Instruct"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "kimi-k2-instruct"); + } + + #[test] + fn test_suffix_stripping_latest() { + let mut caps = HashMap::new(); + caps.insert("claude-3-7-sonnet".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 16384, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "claude-3-7-sonnet-latest"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "claude-3-7-sonnet"); + } + + #[test] + fn test_suffix_stripping_compound() { + let mut caps = HashMap::new(); + caps.insert("gemini-3-pro".to_string(), ModelCapabilities { + n_ctx: 1000000, + max_output_tokens: 64000, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "gemini-3-pro-preview-cheap"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "gemini-3-pro"); + } + + #[test] + fn test_dot_to_dash_normalization() { + let mut caps = HashMap::new(); + caps.insert("claude-opus-4-6".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 128000, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "claude-opus-4.6"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "claude-opus-4-6"); + } + + #[test] + fn test_exact_match_preferred_over_normalized() { + let mut caps = HashMap::new(); + caps.insert("gpt-4o".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 16384, + ..Default::default() + }); + caps.insert("gpt-4o-latest".to_string(), ModelCapabilities { + n_ctx: 200000, + max_output_tokens: 32768, + ..Default::default() + }); + + // Exact match should win over suffix-stripped + let resolved = resolve_model_caps(&caps, "gpt-4o-latest").unwrap(); + assert_eq!(resolved.matched_key, "gpt-4o-latest"); + assert_eq!(resolved.caps.n_ctx, 200000); + } + + #[test] + fn test_fp_suffix_stripping() { + let mut caps = HashMap::new(); + caps.insert("llama-3-70b".to_string(), ModelCapabilities { + n_ctx: 128000, + max_output_tokens: 8192, + ..Default::default() + }); + + let resolved = resolve_model_caps(&caps, "llama-3-70b-fp8"); + assert!(resolved.is_some()); + assert_eq!(resolved.unwrap().matched_key, "llama-3-70b"); + } + + #[test] + fn test_provider_prefix_with_case_mismatch() { + let mut caps = HashMap::new(); + caps.insert("minimax-m2.1".to_string(), ModelCapabilities { + n_ctx: 196000, + max_output_tokens: 16384, + ..Default::default() + }); + + // Both "refact/MiniMax-M2.1" and "MiniMax-M2.1" should resolve + let resolved = resolve_model_caps(&caps, "refact/MiniMax-M2.1"); + assert!(resolved.is_some()); + + let resolved = resolve_model_caps(&caps, "MiniMax-M2.1"); + assert!(resolved.is_some()); + } +} diff --git a/refact-agent/engine/src/caps/providers.rs b/refact-agent/engine/src/caps/providers.rs index 881a243a2..2401fba40 100644 --- a/refact-agent/engine/src/caps/providers.rs +++ b/refact-agent/engine/src/caps/providers.rs @@ -3,17 +3,15 @@ use std::sync::{Arc, OnceLock}; use indexmap::IndexMap; use serde::{Deserialize, Serialize}; -use tokio::sync::RwLock as ARwLock; -use structopt::StructOpt; use crate::caps::{ BaseModelRecord, ChatModelRecord, CodeAssistantCaps, CompletionModelRecord, DefaultModels, EmbeddingModelRecord, HasBaseModelRecord, default_embedding_batch, default_rejection_threshold, - load_caps_value_from_url, resolve_relative_urls, strip_model_from_finetune, normalize_string + strip_model_from_finetune, normalize_string, }; use crate::custom_error::{MapErrToString, YamlError}; -use crate::global_context::{CommandLine, GlobalContext}; -use crate::caps::self_hosted::SelfHostedCaps; + +use crate::llm::adapter::WireFormat; #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct CapsProvider { @@ -24,10 +22,13 @@ pub struct CapsProvider { #[serde(default = "default_true")] pub supports_completion: bool, + #[serde(default)] + pub wire_format: WireFormat, + #[serde(default = "default_endpoint_style")] pub endpoint_style: String, - // This aliases are for backward compatibility with cloud and self-hosted caps + // These aliases are for backward compatibility with cloud and self-hosted caps #[serde(default, alias = "endpoint_template")] pub completion_endpoint: String, #[serde(default, alias = "endpoint_chat_passthrough")] @@ -41,6 +42,9 @@ pub struct CapsProvider { #[serde(default)] pub tokenizer_api_key: String, + #[serde(default)] + pub extra_headers: std::collections::HashMap, + #[serde(default)] pub code_completion_n_ctx: usize, @@ -67,26 +71,51 @@ pub struct CapsProvider { impl CapsProvider { pub fn apply_override(&mut self, value: serde_yaml::Value) -> Result<(), String> { set_field_if_exists::(&mut self.enabled, "enabled", &value)?; + set_field_if_exists::(&mut self.wire_format, "wire_format", &value)?; set_field_if_exists::(&mut self.endpoint_style, "endpoint_style", &value)?; - set_field_if_exists::(&mut self.completion_endpoint, "completion_endpoint", &value)?; + set_field_if_exists::( + &mut self.completion_endpoint, + "completion_endpoint", + &value, + )?; set_field_if_exists::(&mut self.chat_endpoint, "chat_endpoint", &value)?; set_field_if_exists::(&mut self.embedding_endpoint, "embedding_endpoint", &value)?; set_field_if_exists::(&mut self.api_key, "api_key", &value)?; set_field_if_exists::(&mut self.tokenizer_api_key, "tokenizer_api_key", &value)?; - set_field_if_exists::(&mut self.embedding_model, "embedding_model", &value)?; + set_field_if_exists::( + &mut self.embedding_model, + "embedding_model", + &value, + )?; if value.get("embedding_model").is_some() { self.embedding_model.base.removable = true; self.embedding_model.base.user_configured = true; } - extend_model_collection::(&mut self.chat_models, "chat_models", &value, &self.running_models)?; - extend_model_collection::(&mut self.completion_models, "completion_models", &value, &self.running_models)?; + // New provider system writes `enabled_models` when user toggles models via UI. + // If present, replace template running_models with user's explicit selection. + if value.get("enabled_models").is_some() { + self.running_models.clear(); + extend_collection::>(&mut self.running_models, "enabled_models", &value)?; + } extend_collection::>(&mut self.running_models, "running_models", &value)?; + extend_model_collection::( + &mut self.chat_models, + "chat_models", + &value, + &self.running_models, + )?; + extend_model_collection::( + &mut self.completion_models, + "completion_models", + &value, + &self.running_models, + )?; match serde_yaml::from_value::(value) { Ok(default_models) => { self.defaults.apply_override(&default_models, None); - }, + } Err(e) => return Err(e.to_string()), } @@ -95,7 +124,9 @@ impl CapsProvider { } fn set_field_if_exists serde::Deserialize<'de>>( - target: &mut T, field: &str, value: &serde_yaml::Value + target: &mut T, + field: &str, + value: &serde_yaml::Value, ) -> Result<(), String> { if let Some(val) = value.get(field) { *target = serde_yaml::from_value(val.clone()) @@ -105,7 +136,9 @@ fn set_field_if_exists serde::Deserialize<'de>>( } fn extend_collection serde::Deserialize<'de> + Extend + IntoIterator>( - target: &mut C, field: &str, value: &serde_yaml::Value + target: &mut C, + field: &str, + value: &serde_yaml::Value, ) -> Result<(), String> { if let Some(value) = value.get(field) { let imported_collection = serde_yaml::from_value::(value.clone()) @@ -119,7 +152,10 @@ fn extend_collection serde::Deserialize<'de> + Extend + Int // Special implementation for ChatModelRecord and CompletionModelRecord collections // that sets removable=true for newly added models fn extend_model_collection serde::Deserialize<'de> + HasBaseModelRecord>( - target: &mut IndexMap, field: &str, value: &serde_yaml::Value, prev_running_models: &Vec + target: &mut IndexMap, + field: &str, + value: &serde_yaml::Value, + prev_running_models: &Vec, ) -> Result<(), String> { if let Some(value) = value.get(field) { let imported_collection = serde_yaml::from_value::>(value.clone()) @@ -136,13 +172,16 @@ fn extend_model_collection serde::Deserialize<'de> + HasBaseModelRec Ok(()) } -fn default_endpoint_style() -> String { "openai".to_string() } +fn default_endpoint_style() -> String { + "openai".to_string() +} -fn default_true() -> bool { true } +fn default_true() -> bool { + true +} impl<'de> serde::Deserialize<'de> for EmbeddingModelRecord { - fn deserialize>(deserializer: D) -> Result - { + fn deserialize>(deserializer: D) -> Result { #[derive(Deserialize)] #[serde(untagged)] enum Input { @@ -164,7 +203,10 @@ impl<'de> serde::Deserialize<'de> for EmbeddingModelRecord { match Input::deserialize(deserializer)? { Input::String(name) => Ok(EmbeddingModelRecord { - base: BaseModelRecord { name, ..Default::default() }, + base: BaseModelRecord { + name, + ..Default::default() + }, ..Default::default() }), Input::Full(mut helper) => { @@ -179,35 +221,72 @@ impl<'de> serde::Deserialize<'de> for EmbeddingModelRecord { rejection_threshold: helper.rejection_threshold, embedding_size: helper.embedding_size, }) - }, + } } } } -#[derive(Deserialize, Default, Debug)] -pub struct ModelDefaultSettingsUI { - #[serde(default)] - pub chat: ChatModelRecord, - #[serde(default)] - pub completion: CompletionModelRecord, - #[serde(default)] - pub embedding: EmbeddingModelRecord, -} + const PROVIDER_TEMPLATES: &[(&str, &str)] = &[ - ("anthropic", include_str!("../yaml_configs/default_providers/anthropic.yaml")), - ("custom", include_str!("../yaml_configs/default_providers/custom.yaml")), - ("deepseek", include_str!("../yaml_configs/default_providers/deepseek.yaml")), - ("google_gemini", include_str!("../yaml_configs/default_providers/google_gemini.yaml")), - ("groq", include_str!("../yaml_configs/default_providers/groq.yaml")), - ("lmstudio", include_str!("../yaml_configs/default_providers/lmstudio.yaml")), - ("ollama", include_str!("../yaml_configs/default_providers/ollama.yaml")), - ("openai", include_str!("../yaml_configs/default_providers/openai.yaml")), - ("openrouter", include_str!("../yaml_configs/default_providers/openrouter.yaml")), - ("xai", include_str!("../yaml_configs/default_providers/xai.yaml")), + ( + "anthropic", + include_str!("../yaml_configs/default_providers/anthropic.yaml"), + ), + ( + "custom", + include_str!("../yaml_configs/default_providers/custom.yaml"), + ), + ( + "deepseek", + include_str!("../yaml_configs/default_providers/deepseek.yaml"), + ), + ( + "google_gemini", + include_str!("../yaml_configs/default_providers/google_gemini.yaml"), + ), + ( + "groq", + include_str!("../yaml_configs/default_providers/groq.yaml"), + ), + ( + "lmstudio", + include_str!("../yaml_configs/default_providers/lmstudio.yaml"), + ), + ( + "ollama", + include_str!("../yaml_configs/default_providers/ollama.yaml"), + ), + ( + "openai", + include_str!("../yaml_configs/default_providers/openai.yaml"), + ), + ( + "openai_responses", + include_str!("../yaml_configs/default_providers/openai_responses.yaml"), + ), + ( + "openrouter", + include_str!("../yaml_configs/default_providers/openrouter.yaml"), + ), + ( + "refact", + include_str!("../yaml_configs/default_providers/refact.yaml"), + ), + ( + "vllm", + include_str!("../yaml_configs/default_providers/vllm.yaml"), + ), + ( + "xai", + include_str!("../yaml_configs/default_providers/xai.yaml"), + ), + ( + "xai_responses", + include_str!("../yaml_configs/default_providers/xai_responses.yaml"), + ), ]; static PARSED_PROVIDERS: OnceLock> = OnceLock::new(); -static PARSED_MODEL_DEFAULTS: OnceLock> = OnceLock::new(); pub fn get_provider_templates() -> &'static IndexMap { PARSED_PROVIDERS.get_or_init(|| { @@ -224,24 +303,6 @@ pub fn get_provider_templates() -> &'static IndexMap { }) } -pub fn get_provider_model_default_settings_ui() -> &'static IndexMap { - PARSED_MODEL_DEFAULTS.get_or_init(|| { - let mut map = IndexMap::new(); - for (name, yaml) in PROVIDER_TEMPLATES { - let yaml_value = serde_yaml::from_str::(yaml) - .unwrap_or_else(|_| panic!("Failed to parse YAML for provider {}", name)); - - let model_default_settings_ui_value = yaml_value.get("model_default_settings_ui").cloned() - .expect(&format!("Missing `model_model_default_settings_ui` for provider template {name}")); - let model_default_settings_ui = serde_yaml::from_value(model_default_settings_ui_value) - .unwrap_or_else(|e| panic!("Failed to parse model_defaults for provider {}: {}", name, e)); - - map.insert(name.to_string(), model_default_settings_ui); - } - map - }) -} - /// Returns yaml files from providers.d directory, and list of errors from reading /// directory or listing files pub async fn get_provider_yaml_paths(config_dir: &Path) -> (Vec, Vec) { @@ -262,17 +323,22 @@ pub async fn get_provider_yaml_paths(config_dir: &Path) -> (Vec, Vec { let path = entry.path(); - if path.is_file() && - path.extension().map_or(false, |ext| ext == "yaml" || ext == "yml") { + if path.is_file() + && path + .extension() + .map_or(false, |ext| ext == "yaml" || ext == "yml") + { yaml_paths.push(path); } - }, + } Err(e) => { errors.push(format!("Error reading directory entry: {e}")); } } } + yaml_paths.sort(); + (yaml_paths, errors) } @@ -287,7 +353,9 @@ pub fn post_process_provider( add_name_and_id_to_model_records(provider); if !include_disabled_models { provider.chat_models.retain(|_, model| model.base.enabled); - provider.completion_models.retain(|_, model| model.base.enabled); + provider + .completion_models + .retain(|_, model| model.base.enabled); } } @@ -310,6 +378,7 @@ pub async fn read_providers_d( } let provider_templates = get_provider_templates(); + let mut seen_provider_names = std::collections::HashSet::new(); for yaml_path in yaml_paths { let provider_name = match yaml_path.file_stem() { @@ -317,11 +386,31 @@ pub async fn read_providers_d( None => continue, }; + if !seen_provider_names.insert(provider_name.clone()) { + error_log.push(YamlError { + path: yaml_path.to_string_lossy().to_string(), + error_line: 0, + error_msg: format!( + "Duplicate provider name '{}' (another file with the same stem was already processed)", + provider_name + ), + }); + continue; + } + if provider_templates.contains_key(&provider_name) { - match get_provider_from_template_and_config_file(config_dir, &provider_name, false, false, experimental).await { + match get_provider_from_template_and_config_file( + config_dir, + &provider_name, + false, + false, + experimental, + ) + .await + { Ok(provider) => { providers.push(provider); - }, + } Err(e) => { error_log.push(YamlError { path: yaml_path.to_string_lossy().to_string(), @@ -396,52 +485,73 @@ pub async fn get_latest_provider_mtime(config_dir: &Path) -> Option { _ => latest_mtime, }; } - }, + } Err(e) => { tracing::error!("Failed to get metadata for {}: {}", path.display(), e); } } } - latest_mtime.map(|mtime| mtime.duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()) + latest_mtime.map(|mtime| { + mtime + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() + }) } pub fn add_models_to_caps(caps: &mut CodeAssistantCaps, providers: Vec) { - fn add_provider_details_to_model(base_model_rec: &mut BaseModelRecord, provider: &CapsProvider, model_name: &str, endpoint: &str) { + fn add_provider_details_to_model( + base_model_rec: &mut BaseModelRecord, + provider: &CapsProvider, + model_name: &str, + endpoint: &str, + ) { base_model_rec.api_key = provider.api_key.clone(); base_model_rec.tokenizer_api_key = provider.tokenizer_api_key.clone(); base_model_rec.endpoint = endpoint.replace("$MODEL", model_name); base_model_rec.support_metadata = provider.support_metadata; base_model_rec.endpoint_style = provider.endpoint_style.clone(); + base_model_rec.wire_format = provider.wire_format; + base_model_rec.extra_headers = provider.extra_headers.clone(); } for mut provider in providers { - let completion_models = std::mem::take(&mut provider.completion_models); for (model_name, mut model_rec) in completion_models { if model_rec.base.endpoint.is_empty() { add_provider_details_to_model( - &mut model_rec.base, &provider, &model_name, &provider.completion_endpoint + &mut model_rec.base, + &provider, + &model_name, + &provider.completion_endpoint, ); - if provider.code_completion_n_ctx > 0 && provider.code_completion_n_ctx < model_rec.base.n_ctx { + if provider.code_completion_n_ctx > 0 + && provider.code_completion_n_ctx < model_rec.base.n_ctx + { // model is capable of more, but we may limit it from server or provider, e.x. for latency model_rec.base.n_ctx = provider.code_completion_n_ctx; } } - caps.completion_models.insert(model_rec.base.id.clone(), Arc::new(model_rec)); + caps.completion_models + .insert(model_rec.base.id.clone(), Arc::new(model_rec)); } let chat_models = std::mem::take(&mut provider.chat_models); for (model_name, mut model_rec) in chat_models { if model_rec.base.endpoint.is_empty() { add_provider_details_to_model( - &mut model_rec.base, &provider, &model_name, &provider.chat_endpoint + &mut model_rec.base, + &provider, + &model_name, + &provider.chat_endpoint, ); } - caps.chat_models.insert(model_rec.base.id.clone(), Arc::new(model_rec)); + caps.chat_models + .insert(model_rec.base.id.clone(), Arc::new(model_rec)); } if provider.embedding_model.is_configured() && provider.embedding_model.base.enabled { @@ -450,13 +560,17 @@ pub fn add_models_to_caps(caps: &mut CodeAssistantCaps, providers: Vec, - pub chat_models: IndexMap, +} + +#[derive(Deserialize)] +pub struct EmbeddingPresets { pub embedding_models: IndexMap, } -const UNPARSED_KNOWN_MODELS: &'static str = include_str!("../known_models.json"); -static KNOWN_MODELS: OnceLock = OnceLock::new(); - -pub fn get_known_models() -> &'static KnownModels { - KNOWN_MODELS.get_or_init(|| { - serde_json::from_str::(UNPARSED_KNOWN_MODELS).map_err(|e| { - let up_to_line = UNPARSED_KNOWN_MODELS.lines().take(e.line()).collect::>().join("\n"); - panic!("{}\nfailed to parse KNOWN_MODELS: {}", up_to_line, e); - }).unwrap() + +const UNPARSED_COMPLETION_PRESETS: &str = include_str!("../completion_presets.json"); +const UNPARSED_EMBEDDING_PRESETS: &str = include_str!("../embedding_presets.json"); + +static COMPLETION_PRESETS: OnceLock = OnceLock::new(); +static EMBEDDING_PRESETS: OnceLock = OnceLock::new(); + +pub fn get_completion_presets() -> &'static CompletionPresets { + COMPLETION_PRESETS.get_or_init(|| { + serde_json::from_str::(UNPARSED_COMPLETION_PRESETS) + .unwrap_or_else(|e| { + let up_to_line = UNPARSED_COMPLETION_PRESETS + .lines() + .take(e.line()) + .collect::>() + .join("\n"); + panic!("{}\nfailed to parse COMPLETION_PRESETS: {}", up_to_line, e); + }) + }) +} + +pub fn get_embedding_presets() -> &'static EmbeddingPresets { + EMBEDDING_PRESETS.get_or_init(|| { + serde_json::from_str::(UNPARSED_EMBEDDING_PRESETS) + .unwrap_or_else(|e| { + let up_to_line = UNPARSED_EMBEDDING_PRESETS + .lines() + .take(e.line()) + .collect::>() + .join("\n"); + panic!("{}\nfailed to parse EMBEDDING_PRESETS: {}", up_to_line, e); + }) }) } fn populate_model_records(provider: &mut CapsProvider, experimental: bool) { - let known_models = get_known_models(); + let completion_presets = get_completion_presets(); + let embedding_presets = get_embedding_presets(); for model_name in &provider.running_models { if !provider.completion_models.contains_key(model_name) { - if let Some(model_rec) = find_model_match(model_name, &provider.completion_models, &known_models.completion_models, experimental) { - provider.completion_models.insert(model_name.clone(), model_rec); + if let Some(model_rec) = find_model_match( + model_name, + &provider.completion_models, + &completion_presets.completion_models, + experimental, + ) { + provider + .completion_models + .insert(model_name.clone(), model_rec); } } if !provider.chat_models.contains_key(model_name) { - if let Some(model_rec) = find_model_match(model_name, &provider.chat_models, &known_models.chat_models, experimental) { - provider.chat_models.insert(model_name.clone(), model_rec); - } - } - } - - for model in &provider.running_models { - if !provider.completion_models.contains_key(model) && - !provider.chat_models.contains_key(model) && - !(model == &provider.embedding_model.base.name) { - tracing::warn!("Indicated as running, unknown model {:?} for provider {}, maybe update this rust binary", model, provider.name); + let placeholder = ChatModelRecord { + base: BaseModelRecord { + enabled: true, + ..Default::default() + }, + ..Default::default() + }; + provider.chat_models.insert(model_name.clone(), placeholder); } } if !provider.embedding_model.is_configured() && !provider.embedding_model.base.name.is_empty() { let model_name = provider.embedding_model.base.name.clone(); - if let Some(model_rec) = find_model_match(&model_name, &IndexMap::new(), &known_models.embedding_models, experimental) { + if let Some(model_rec) = find_model_match( + &model_name, + &IndexMap::new(), + &embedding_presets.embedding_models, + experimental, + ) { provider.embedding_model = model_rec; provider.embedding_model.base.name = model_name; } else { - tracing::warn!("Unknown embedding model '{}', maybe configure it or update this binary", model_name); + tracing::warn!( + "Unknown embedding model '{}', maybe configure it or update this binary", + model_name + ); + } + } + + if provider.embedding_model.is_configured() { + let model_name = provider.embedding_model.base.name.clone(); + if let Some(preset) = find_model_match( + &model_name, + &IndexMap::new(), + &embedding_presets.embedding_models, + experimental, + ) { + if provider.embedding_model.base.tokenizer.is_empty() { + provider.embedding_model.base.tokenizer = preset.base.tokenizer.clone(); + } + if !provider.embedding_model.base.user_configured { + if provider.embedding_model.base.n_ctx == 0 { + provider.embedding_model.base.n_ctx = preset.base.n_ctx; + } + if provider.embedding_model.embedding_size == 0 { + provider.embedding_model.embedding_size = preset.embedding_size; + } + if provider.embedding_model.rejection_threshold == 0.0 { + provider.embedding_model.rejection_threshold = preset.rejection_threshold; + } + if provider.embedding_model.embedding_batch == 0 { + provider.embedding_model.embedding_batch = preset.embedding_batch; + } + } + } + if provider.embedding_model.base.tokenizer.is_empty() { + tracing::warn!( + "Embedding model '{}' has no tokenizer configured and no preset match; VecDB may fail to start", + provider.embedding_model.base.name + ); } } } @@ -561,32 +754,41 @@ fn find_model_match( ) -> Option { let model_stripped = strip_model_from_finetune(model_name); - if let Some(model) = provider_models.get(model_name) - .or_else(|| provider_models.get(&model_stripped)) { + if let Some(model) = provider_models + .get(model_name) + .or_else(|| provider_models.get(&model_stripped)) + { if !model.base().experimental || experimental { return Some(model.clone()); } } for model in provider_models.values() { - if model.base().similar_models.contains(model_name) || - model.base().similar_models.contains(&model_stripped) { + if model.base().similar_models.contains(model_name) + || model.base().similar_models.contains(&model_stripped) + { if !model.base().experimental || experimental { return Some(model.clone()); } } } - if let Some(model) = known_models.get(model_name) - .or_else(|| known_models.get(&model_stripped)) { + if let Some(model) = known_models + .get(model_name) + .or_else(|| known_models.get(&model_stripped)) + { if !model.base().experimental || experimental { return Some(model.clone()); } } for model in known_models.values() { - if model.base().similar_models.contains(&model_name.to_string()) || - model.base().similar_models.contains(&model_stripped) { + if model + .base() + .similar_models + .contains(&model_name.to_string()) + || model.base().similar_models.contains(&model_stripped) + { if !model.base().experimental || experimental { return Some(model.clone()); } @@ -596,21 +798,27 @@ fn find_model_match( None } -pub fn resolve_api_key(provider: &CapsProvider, key: &str, fallback: &str, key_name: &str) -> String { +pub fn resolve_api_key( + provider: &CapsProvider, + key: &str, + fallback: &str, + key_name: &str, +) -> String { match key { k if k.is_empty() => fallback.to_string(), - k if k.starts_with("$") => { - match std::env::var(&k[1..]) { - Ok(env_val) => env_val, - Err(e) => { - tracing::error!( - "tried to read {} from env var {} for provider {}, but failed: {}", - key_name, k, provider.name, e - ); - fallback.to_string() - } + k if k.starts_with("$") => match std::env::var(&k[1..]) { + Ok(env_val) => env_val, + Err(e) => { + tracing::error!( + "tried to read {} from env var {} for provider {}, but failed: {}", + key_name, + k, + provider.name, + e + ); + fallback.to_string() } - } + }, k => k.to_string(), } } @@ -619,27 +827,31 @@ pub fn resolve_provider_api_key(provider: &CapsProvider, cmdline_api_key: &str) resolve_api_key(provider, &provider.api_key, &cmdline_api_key, "API key") } -pub fn resolve_tokenizer_api_key(provider: &CapsProvider) -> String { - resolve_api_key(provider, &provider.tokenizer_api_key, "", "tokenizer API key") -} - pub async fn get_provider_from_template_and_config_file( - config_dir: &Path, name: &str, config_file_must_exist: bool, post_process: bool, experimental: bool + config_dir: &Path, + name: &str, + config_file_must_exist: bool, + post_process: bool, + experimental: bool, ) -> Result { - let mut provider = get_provider_templates().get(name).cloned() + let mut provider = get_provider_templates() + .get(name) + .cloned() .ok_or("Provider template not found")?; let provider_path = config_dir.join("providers.d").join(format!("{name}.yaml")); let config_file_value = match tokio::fs::read_to_string(&provider_path).await { - Ok(content) => { - serde_yaml::from_str::(&content) - .map_err_with_prefix(format!("Error parsing file {}:", provider_path.display()))? - }, + Ok(content) => serde_yaml::from_str::(&content) + .map_err_with_prefix(format!("Error parsing file {}:", provider_path.display()))?, Err(e) if e.kind() == std::io::ErrorKind::NotFound && !config_file_must_exist => { serde_yaml::Value::Mapping(serde_yaml::Mapping::new()) - }, + } Err(e) => { - return Err(format!("Failed to read file {}: {}", provider_path.display(), e)); + return Err(format!( + "Failed to read file {}: {}", + provider_path.display(), + e + )); } }; @@ -652,29 +864,6 @@ pub async fn get_provider_from_template_and_config_file( Ok(provider) } -pub async fn get_provider_from_server(gcx: Arc>) -> Result { - let command_line = CommandLine::from_args(); - let cmdline_api_key = command_line.api_key.clone(); - let cmdline_experimental = command_line.experimental; - let (caps_value, caps_url) = load_caps_value_from_url(command_line, gcx.clone()).await?; - - if let Ok(self_hosted_caps) = serde_json::from_value::(caps_value.clone()) { - let mut provider = self_hosted_caps.into_provider(&caps_url, &cmdline_api_key)?; - post_process_provider(&mut provider, true, cmdline_experimental); - provider.api_key = resolve_provider_api_key(&provider, &cmdline_api_key); - provider.tokenizer_api_key = resolve_tokenizer_api_key(&provider); - Ok(provider) - } else { - let mut provider = serde_json::from_value::(caps_value).map_err_to_string()?; - - resolve_relative_urls(&mut provider, &caps_url)?; - post_process_provider(&mut provider, true, cmdline_experimental); - provider.api_key = resolve_provider_api_key(&provider, &cmdline_api_key); - provider.tokenizer_api_key = resolve_tokenizer_api_key(&provider); - Ok(provider) - } -} - #[cfg(test)] mod tests { use super::*; @@ -685,7 +874,79 @@ mod tests { } #[test] - fn test_parse_known_models() { - let _ = get_known_models(); // This will panic if any model fails to parse + fn test_parse_completion_presets() { + let _ = get_completion_presets(); // This will panic if any preset fails to parse + } + + #[test] + fn test_parse_embedding_presets() { + let _ = get_embedding_presets(); // This will panic if any preset fails to parse + } + + #[test] + fn test_embedding_tokenizer_prefill_from_preset() { + let mut provider = CapsProvider { + name: "test".to_string(), + embedding_model: EmbeddingModelRecord { + base: BaseModelRecord { + name: "text-embedding-3-small".to_string(), + n_ctx: 8191, + tokenizer: String::new(), + enabled: true, + ..Default::default() + }, + embedding_size: 1536, + ..Default::default() + }, + ..Default::default() + }; + populate_model_records(&mut provider, false); + assert!( + !provider.embedding_model.base.tokenizer.is_empty(), + "tokenizer should have been filled from embedding presets" + ); + assert_eq!( + provider.embedding_model.base.tokenizer, + "hf://Xenova/text-embedding-ada-002" + ); + } + + #[test] + fn test_embedding_prefill_respects_user_configured() { + let mut provider = CapsProvider { + name: "test".to_string(), + embedding_model: EmbeddingModelRecord { + base: BaseModelRecord { + name: "text-embedding-3-small".to_string(), + n_ctx: 4096, + tokenizer: String::new(), + enabled: true, + user_configured: true, + ..Default::default() + }, + embedding_size: 0, + rejection_threshold: 0.0, + embedding_batch: 0, + }, + ..Default::default() + }; + populate_model_records(&mut provider, false); + assert_eq!( + provider.embedding_model.base.tokenizer, + "hf://Xenova/text-embedding-ada-002", + "tokenizer should always be filled even for user-configured models" + ); + assert_eq!( + provider.embedding_model.base.n_ctx, 4096, + "user-configured n_ctx should NOT be overwritten" + ); + assert_eq!( + provider.embedding_model.embedding_size, 0, + "user-configured zero embedding_size should NOT be overwritten" + ); + assert_eq!( + provider.embedding_model.rejection_threshold, 0.0, + "user-configured zero rejection_threshold should NOT be overwritten" + ); } } diff --git a/refact-agent/engine/src/caps/self_hosted.rs b/refact-agent/engine/src/caps/self_hosted.rs deleted file mode 100644 index 7d3355a1b..000000000 --- a/refact-agent/engine/src/caps/self_hosted.rs +++ /dev/null @@ -1,402 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use indexmap::IndexMap; -use serde::Deserialize; - -use crate::caps::{ - BaseModelRecord, ChatModelRecord, CodeAssistantCaps, CompletionModelRecord, DefaultModels, - EmbeddingModelRecord, CapsMetadata, default_chat_scratchpad, default_completion_scratchpad, - default_completion_scratchpad_patch, default_embedding_batch, default_hf_tokenizer_template, - default_rejection_threshold, relative_to_full_url, normalize_string, resolve_relative_urls -}; -use crate::caps::providers; - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsModelRecord { - pub n_ctx: usize, - - #[serde(default)] - pub supports_scratchpads: HashMap, - - #[serde(default)] - pub supports_tools: bool, - - #[serde(default)] - pub supports_multimodality: bool, - - #[serde(default)] - pub supports_clicks: bool, - - #[serde(default)] - pub supports_agent: bool, - - #[serde(default)] - pub supports_reasoning: Option, - - #[serde(default)] - pub supports_boost_reasoning: bool, - - #[serde(default)] - pub default_temperature: Option, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsEmbeddingModelRecord { - pub n_ctx: usize, - pub size: i32, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsCompletion { - pub endpoint: String, - pub models: IndexMap, - pub default_model: String, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsChat { - pub endpoint: String, - pub models: IndexMap, - pub default_model: String, - #[serde(default)] - pub default_light_model: String, - #[serde(default)] - pub default_thinking_model: String, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsEmbedding { - pub endpoint: String, - pub models: IndexMap, - pub default_model: String, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCapsTelemetryEndpoints { - pub telemetry_basic_endpoint: String, - pub telemetry_basic_retrieve_my_own_endpoint: String, -} - -#[derive(Debug, Deserialize, Clone, Default)] -pub struct SelfHostedCaps { - #[serde(deserialize_with = "normalize_string")] - pub cloud_name: String, - - pub completion: SelfHostedCapsCompletion, - pub chat: SelfHostedCapsChat, - pub embedding: SelfHostedCapsEmbedding, - - pub telemetry_endpoints: SelfHostedCapsTelemetryEndpoints, - pub tokenizer_endpoints: HashMap, - - #[serde(default)] - pub customization: String, - #[serde(default)] - pub caps_version: i64, - #[serde(default)] - pub support_metadata: bool, - #[serde(default)] - pub metadata: CapsMetadata, -} - -fn configure_base_model( - base_model: &mut BaseModelRecord, - model_name: &str, - endpoint: &str, - cloud_name: &str, - tokenizer_endpoints: &HashMap, - caps_url: &String, - cmdline_api_key: &str, - support_metadata: &bool, -) -> Result<(), String> { - base_model.name = model_name.to_string(); - base_model.id = format!("{}/{}", cloud_name, model_name); - if base_model.endpoint.is_empty() { - base_model.endpoint = relative_to_full_url(caps_url, &endpoint.replace("$MODEL", model_name))?; - } - if let Some(tokenizer) = tokenizer_endpoints.get(&base_model.name) { - base_model.tokenizer = relative_to_full_url(caps_url, &tokenizer)?; - } - base_model.support_metadata = support_metadata.clone(); - base_model.api_key = cmdline_api_key.to_string(); - base_model.endpoint_style = "openai".to_string(); - Ok(()) -} - -impl SelfHostedCapsModelRecord { - fn get_completion_scratchpad(&self) -> (String, serde_json::Value) { - if !self.supports_scratchpads.is_empty() { - let scratchpad_name = self.supports_scratchpads.keys().next().unwrap_or(&default_completion_scratchpad()).clone(); - let scratchpad_patch = self.supports_scratchpads.values().next().unwrap_or(&serde_json::Value::Null).clone(); - (scratchpad_name, scratchpad_patch) - } else { - (default_completion_scratchpad(), default_completion_scratchpad_patch()) - } - } - - fn get_chat_scratchpad(&self) -> (String, serde_json::Value) { - if !self.supports_scratchpads.is_empty() { - let scratchpad_name = self.supports_scratchpads.keys().next().unwrap_or(&default_chat_scratchpad()).clone(); - let scratchpad_patch = self.supports_scratchpads.values().next().unwrap_or(&serde_json::Value::Null).clone(); - (scratchpad_name, scratchpad_patch) - } else { - (default_chat_scratchpad(), serde_json::Value::Null) - } - } - - pub fn into_completion_model( - &self, - model_name: &str, - self_hosted_caps: &SelfHostedCaps, - caps_url: &String, - cmdline_api_key: &str, - ) -> Result { - let mut base = BaseModelRecord { - n_ctx: self.n_ctx, - enabled: true, - ..Default::default() - }; - - configure_base_model( - &mut base, - model_name, - &self_hosted_caps.completion.endpoint, - &self_hosted_caps.cloud_name, - &self_hosted_caps.tokenizer_endpoints, - caps_url, - cmdline_api_key, - &self_hosted_caps.support_metadata, - )?; - - let (scratchpad, scratchpad_patch) = self.get_completion_scratchpad(); - - Ok(CompletionModelRecord { - base, - scratchpad, - scratchpad_patch, - model_family: None, - }) - } -} - -impl SelfHostedCapsModelRecord { - pub fn into_chat_model( - &self, - model_name: &str, - self_hosted_caps: &SelfHostedCaps, - caps_url: &String, - cmdline_api_key: &str, - ) -> Result { - let mut base = BaseModelRecord { - n_ctx: self.n_ctx, - enabled: true, - ..Default::default() - }; - - let (scratchpad, scratchpad_patch) = self.get_chat_scratchpad(); - - // Non passthrough models, don't support endpoints of `/v1/chat/completions` in openai style, only `/v1/completions` - let endpoint_to_use = if scratchpad == "PASSTHROUGH" { - &self_hosted_caps.chat.endpoint - } else { - &self_hosted_caps.completion.endpoint - }; - - configure_base_model( - &mut base, - model_name, - endpoint_to_use, - &self_hosted_caps.cloud_name, - &self_hosted_caps.tokenizer_endpoints, - caps_url, - cmdline_api_key, - &self_hosted_caps.support_metadata, - )?; - - Ok(ChatModelRecord { - base, - scratchpad, - scratchpad_patch, - supports_tools: self.supports_tools, - supports_multimodality: self.supports_multimodality, - supports_clicks: self.supports_clicks, - supports_agent: self.supports_agent, - supports_reasoning: self.supports_reasoning.clone(), - supports_boost_reasoning: self.supports_boost_reasoning, - default_temperature: self.default_temperature, - }) - } -} - -impl SelfHostedCapsEmbeddingModelRecord { - pub fn into_embedding_model( - &self, - model_name: &str, - self_hosted_caps: &SelfHostedCaps, - caps_url: &String, - cmdline_api_key: &str, - ) -> Result { - let mut embedding_model = EmbeddingModelRecord { - base: BaseModelRecord { n_ctx: self.n_ctx, enabled: true, ..Default::default() }, - embedding_size: self.size, - rejection_threshold: default_rejection_threshold(), - embedding_batch: default_embedding_batch(), - }; - - configure_base_model( - &mut embedding_model.base, - model_name, - &self_hosted_caps.embedding.endpoint, - &self_hosted_caps.cloud_name, - &self_hosted_caps.tokenizer_endpoints, - caps_url, - cmdline_api_key, - &self_hosted_caps.support_metadata, - )?; - - Ok(embedding_model) - } -} - - -impl SelfHostedCaps { - pub fn into_caps(self, caps_url: &String, cmdline_api_key: &str) -> Result { - let mut caps = CodeAssistantCaps { - cloud_name: self.cloud_name.clone(), - - telemetry_basic_dest: relative_to_full_url(caps_url, &self.telemetry_endpoints.telemetry_basic_endpoint)?, - telemetry_basic_retrieve_my_own: relative_to_full_url(caps_url, &self.telemetry_endpoints.telemetry_basic_retrieve_my_own_endpoint)?, - - completion_models: IndexMap::new(), - chat_models: IndexMap::new(), - embedding_model: EmbeddingModelRecord::default(), - - defaults: DefaultModels { - completion_default_model: format!("{}/{}", self.cloud_name, self.completion.default_model), - chat_default_model: format!("{}/{}", self.cloud_name, self.chat.default_model), - chat_thinking_model: if self.chat.default_thinking_model.is_empty() { - String::new() - } else { - format!("{}/{}", self.cloud_name, self.chat.default_thinking_model) - }, - chat_light_model: if self.chat.default_light_model.is_empty() { - String::new() - } else { - format!("{}/{}", self.cloud_name, self.chat.default_light_model) - }, - }, - customization: self.customization.clone(), - caps_version: self.caps_version, - - hf_tokenizer_template: default_hf_tokenizer_template(), - - metadata: self.metadata.clone(), - }; - - for (model_name, model_rec) in &self.completion.models { - let completion_model = model_rec.into_completion_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - - caps.completion_models.insert(completion_model.base.id.clone(), Arc::new(completion_model)); - } - - for (model_name, model_rec) in &self.chat.models { - let chat_model = model_rec.into_chat_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - - caps.chat_models.insert(chat_model.base.id.clone(), Arc::new(chat_model)); - } - - if let Some((model_name, model_rec)) = self.embedding.models.get_key_value(&self.embedding.default_model) { - let embedding_model = model_rec.into_embedding_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - caps.embedding_model = embedding_model; - } - - Ok(caps) - } - - pub fn into_provider(self, caps_url: &String, cmdline_api_key: &str) -> Result { - let mut provider = providers::CapsProvider { - name: self.cloud_name.clone(), - enabled: true, - supports_completion: true, - endpoint_style: "openai".to_string(), - completion_endpoint: self.completion.endpoint.clone(), - chat_endpoint: self.chat.endpoint.clone(), - embedding_endpoint: self.embedding.endpoint.clone(), - api_key: cmdline_api_key.to_string(), - tokenizer_api_key: cmdline_api_key.to_string(), - code_completion_n_ctx: 0, - support_metadata: self.support_metadata, - completion_models: IndexMap::new(), - chat_models: IndexMap::new(), - embedding_model: EmbeddingModelRecord::default(), - models_dict_patch: IndexMap::new(), - defaults: DefaultModels { - completion_default_model: self.completion.default_model.clone(), - chat_default_model: self.chat.default_model.clone(), - chat_thinking_model: if self.chat.default_thinking_model.is_empty() { - String::new() - } else { - format!("{}/{}", self.cloud_name, self.chat.default_thinking_model) - }, - chat_light_model: if self.chat.default_light_model.is_empty() { - String::new() - } else { - format!("{}/{}", self.cloud_name, self.chat.default_light_model) - }, - }, - running_models: Vec::new(), - }; - - for (model_name, model_rec) in &self.completion.models { - let completion_model = model_rec.into_completion_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - - provider.completion_models.insert(model_name.clone(), completion_model); - } - - for (model_name, model_rec) in &self.chat.models { - let chat_model = model_rec.into_chat_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - - provider.chat_models.insert(model_name.clone(), chat_model); - } - - if let Some((model_name, model_rec)) = self.embedding.models.get_key_value(&self.embedding.default_model) { - let embedding_model = model_rec.into_embedding_model( - model_name, - &self, - caps_url, - cmdline_api_key, - )?; - provider.embedding_model = embedding_model; - } - - resolve_relative_urls(&mut provider, caps_url)?; - - Ok(provider) - } -} diff --git a/refact-agent/engine/src/chat/cache_guard.rs b/refact-agent/engine/src/chat/cache_guard.rs new file mode 100644 index 000000000..6773701a9 --- /dev/null +++ b/refact-agent/engine/src/chat/cache_guard.rs @@ -0,0 +1,363 @@ +use std::sync::Arc; + +use serde_json::{Map, Value}; +use similar::{Algorithm, TextDiff}; +use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; + +use crate::global_context::GlobalContext; +use crate::providers::traits::ModelPricing; +use crate::tokens::{cached_tokenizer, count_text_tokens_with_fallback}; + +const CACHE_GUARD_TOOL_NAME: &str = "cache_guard"; +const CACHE_GUARD_TOOL_CALL_ID: &str = "cacheguard_force"; +const CACHE_GUARD_RULE: &str = "Prompt cache prefix violation"; + +const IGNORED_KEYS: &[&str] = &[ + "cache_control", + "request_attempt_id", + "temperature", + "max_tokens", + "max_completion_tokens", + "max_output_tokens", + "frequency_penalty", + "stop", + "stop_sequences", + "n", + "usage", + "finish_reason", + "checkpoints", + "provider_specific_fields", +]; + +pub fn is_cache_guard_pause_id(tool_call_id: &str) -> bool { + tool_call_id.starts_with("cacheguard_") +} + +pub fn is_cache_guard_pause_reason(reason: &crate::chat::types::PauseReason) -> bool { + reason.tool_name == CACHE_GUARD_TOOL_NAME || is_cache_guard_pause_id(&reason.tool_call_id) +} + +pub async fn is_guard_enabled_for_model( + gcx: Arc>, + model_id: &str, +) -> bool { + let Some(pricing) = get_model_pricing(&gcx, model_id).await else { + return false; + }; + pricing.cache_read.is_some() || pricing.cache_creation.is_some() +} + +pub fn sanitize_body_for_cache_guard(value: &Value) -> Value { + sanitize_value(value) +} + +pub fn is_append_only_prefix(prev: &Value, next: &Value) -> bool { + is_append_only_prefix_inner(prev, next, true, None) +} + +fn is_append_only_prefix_inner( + prev: &Value, + next: &Value, + strict_object: bool, + parent_key: Option<&str>, +) -> bool { + match (prev, next) { + (Value::Null, Value::Null) + | (Value::Bool(_), Value::Bool(_)) + | (Value::Number(_), Value::Number(_)) + | (Value::String(_), Value::String(_)) => prev == next, + (Value::Array(a), Value::Array(b)) => { + if a.len() > b.len() { + return false; + } + let is_messages_array = parent_key == Some("messages"); + a.iter().zip(b.iter()).all(|(old_item, new_item)| { + is_append_only_prefix_inner(old_item, new_item, is_messages_array, None) + }) + } + (Value::Object(a), Value::Object(b)) => { + if strict_object { + if a.len() != b.len() { + return false; + } + if !a.keys().all(|k| b.contains_key(k)) { + return false; + } + } + a.iter().all(|(k, old_v)| { + b.get(k) + .map(|new_v| is_append_only_prefix_inner(old_v, new_v, false, Some(k))) + .unwrap_or(false) + }) + } + _ => false, + } +} + +pub fn unified_json_diff(prev: &Value, next: &Value) -> String { + let prev_pretty = serde_json::to_string_pretty(prev).unwrap_or_else(|_| prev.to_string()); + let next_pretty = serde_json::to_string_pretty(next).unwrap_or_else(|_| next.to_string()); + + let diff = TextDiff::configure() + .algorithm(Algorithm::Myers) + .diff_lines(&prev_pretty, &next_pretty); + + diff.unified_diff() + .context_radius(6) + .header("previous", "current") + .to_string() +} + +pub async fn estimate_extra_cache_miss_usd( + gcx: Arc>, + model_id: &str, + previous_sanitized: &Value, +) -> Option { + let pricing = get_model_pricing(&gcx, model_id).await?; + let cache_read_rate = pricing.cache_read?; + if pricing.prompt <= cache_read_rate { + return Some(0.0); + } + + let previous_pretty = serde_json::to_string_pretty(previous_sanitized).ok()?; + let model_rec = { + let caps = crate::global_context::try_load_caps_quickly_if_not_present(gcx.clone(), 0) + .await + .ok()?; + crate::caps::resolve_chat_model(caps, model_id).ok()? + }; + let tokenizer = cached_tokenizer(gcx, &model_rec.base).await.ok().flatten(); + let cached_tokens = count_text_tokens_with_fallback(tokenizer, &previous_pretty); + let delta_rate = pricing.prompt - cache_read_rate; + Some((cached_tokens as f64) * delta_rate / 1_000_000.0) +} + +pub async fn check_or_pause_cache_guard( + gcx: Arc>, + session_arc: Arc>, + model_id: &str, + request_body: &Value, +) -> Result, String> { + if !is_guard_enabled_for_model(gcx.clone(), model_id).await { + return Ok(None); + } + + // OpenAI Responses API stateful mode: when previous_response_id is present, + // the server handles caching via response chaining. The request body intentionally + // sends only tail items (not the full conversation), so the append-only prefix + // check does not apply. + if request_body.get("previous_response_id").is_some_and(|v| !v.is_null()) { + return Ok(None); + } + + let sanitized = sanitize_body_for_cache_guard(request_body); + + let maybe_violation_prev = { + let mut session = session_arc.lock().await; + if session.cache_guard_force_next { + session.cache_guard_force_next = false; + None + } else if let Some(prev) = session.cache_guard_snapshot.as_ref() { + if is_append_only_prefix(prev, &sanitized) { + None + } else { + Some(prev.clone()) + } + } else { + None + } + }; + + let Some(previous) = maybe_violation_prev else { + return Ok(Some(sanitized)); + }; + + let diff = unified_json_diff(&previous, &sanitized); + let estimated_extra_usd = estimate_extra_cache_miss_usd(gcx.clone(), model_id, &previous).await; + + { + let mut session = session_arc.lock().await; + session.discard_draft_for_pause(); + session + .abort_flag + .store(true, std::sync::atomic::Ordering::SeqCst); + + let mut summary = format!( + "Prompt cache append-only prefix check failed for model `{}`.\n\n", + model_id + ); + if let Some(extra) = estimated_extra_usd { + summary.push_str(&format!( + "Estimated extra cost if cache miss occurs: `${:.6}` USD.\n\n", + extra + )); + } + summary.push_str("Unified diff (sanitized provider request body):\n\n"); + summary.push_str("```diff\n"); + summary.push_str(&diff); + summary.push_str("\n```\n"); + + let reasons = vec![crate::chat::types::PauseReason { + reason_type: "confirmation".to_string(), + tool_name: CACHE_GUARD_TOOL_NAME.to_string(), + command: summary, + rule: CACHE_GUARD_RULE.to_string(), + tool_call_id: CACHE_GUARD_TOOL_CALL_ID.to_string(), + integr_config_path: None, + }]; + session.set_paused_with_reasons_and_auto_approved(reasons, Vec::new(), None); + } + + Err("Cache guard: request blocked due to prompt prefix violation".to_string()) +} + +pub async fn commit_cache_guard_snapshot( + session_arc: Arc>, + sanitized_body: Value, +) { + let mut session = session_arc.lock().await; + session.cache_guard_snapshot = Some(sanitized_body); +} + +async fn get_model_pricing( + gcx: &Arc>, + model_id: &str, +) -> Option { + let parts: Vec<&str> = model_id.splitn(2, '/').collect(); + if parts.len() != 2 { + return None; + } + let provider_name = parts[0]; + let model_name = parts[1]; + + let gcx_locked = gcx.read().await; + let registry = gcx_locked.providers.read().await; + registry + .get(provider_name) + .and_then(|provider| provider.model_pricing(model_name)) +} + +fn sanitize_value(value: &Value) -> Value { + match value { + Value::Object(map) => { + let mut out = Map::new(); + for (k, v) in map { + if IGNORED_KEYS.contains(&k.as_str()) { + continue; + } + out.insert(k.clone(), sanitize_value(v)); + } + Value::Object(out) + } + Value::Array(arr) => Value::Array(arr.iter().map(sanitize_value).collect()), + _ => value.clone(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_sanitize_removes_ignored_fields_recursively() { + let input = json!({ + "messages": [ + {"role": "user", "content": [{"type": "text", "text": "hello", "cache_control": {"type": "ephemeral"}}]}, + {"role": "assistant", "content": "ok", "provider_specific_fields": {"x": 1}} + ], + "temperature": 0.3, + "max_tokens": 1024, + "meta": {"request_attempt_id": "abc", "chat_id": "x"}, + "reasoning_effort": "high" + }); + let out = sanitize_body_for_cache_guard(&input); + assert!(out.get("temperature").is_none()); + assert!(out.get("max_tokens").is_none()); + assert_eq!(out["reasoning_effort"], "high"); + assert!(out["meta"].get("request_attempt_id").is_none()); + assert_eq!(out["meta"]["chat_id"], "x"); + + let first_content = out["messages"][0]["content"].as_array().unwrap(); + assert!(first_content[0].get("cache_control").is_none()); + assert!(out["messages"][1].get("provider_specific_fields").is_none()); + } + + #[test] + fn test_append_only_prefix_objects_and_arrays() { + let prev = json!({"messages": [1, 2], "meta": {"a": 1}}); + let next = json!({"messages": [1, 2, 3], "meta": {"a": 1, "b": 2}}); + assert!(is_append_only_prefix(&prev, &next)); + + let bad = json!({"messages": [1, 99, 3], "meta": {"a": 1}}); + assert!(!is_append_only_prefix(&prev, &bad)); + } + + #[test] + fn test_append_only_prefix_strict_top_level_keys() { + let prev = json!({"messages": [1, 2], "meta": {"a": 1}}); + let next = json!({"messages": [1, 2, 3], "meta": {"a": 1}, "extra": true}); + assert!(!is_append_only_prefix(&prev, &next)); + } + + #[test] + fn test_append_only_prefix_messages_keys_strict() { + let prev = json!({ + "messages": [ + {"role": "user", "content": "hi"} + ] + }); + let next = json!({ + "messages": [ + {"role": "user", "content": "hi", "extra": true} + ] + }); + assert!(!is_append_only_prefix(&prev, &next)); + } + + #[test] + fn test_cache_guard_pause_reason_detection() { + let reason = crate::chat::types::PauseReason { + reason_type: "confirmation".to_string(), + tool_name: "cache_guard".to_string(), + command: String::new(), + rule: String::new(), + tool_call_id: "cacheguard_force".to_string(), + integr_config_path: None, + }; + assert!(is_cache_guard_pause_reason(&reason)); + assert!(is_cache_guard_pause_id("cacheguard_force_once")); + assert!(!is_cache_guard_pause_id("call_123")); + } + + #[test] + fn test_append_only_prefix_ignores_previous_response_id() { + // When previous_response_id is present, the request body uses tail-only mode + // (only new messages after last assistant), so the full-body comparison is invalid. + // The cache guard should skip validation in this case. + let prev = json!({ + "model": "gpt-4.1", + "instructions": "You are helpful", + "input": [ + {"type": "message", "role": "user", "content": [{"type": "input_text", "text": "hello"}]} + ], + "store": true + }); + let next_stateful = json!({ + "model": "gpt-4.1", + "instructions": "You are helpful", + "input": [ + {"type": "function_call_output", "call_id": "call_1", "output": "result"} + ], + "store": true, + "previous_response_id": "resp_abc123" + }); + // This SHOULD fail the append-only check (different input, extra key) + assert!(!is_append_only_prefix( + &sanitize_body_for_cache_guard(&prev), + &sanitize_body_for_cache_guard(&next_stateful), + )); + // The check_or_pause_cache_guard function would skip entirely when + // previous_response_id is present, avoiding this false violation. + } +} diff --git a/refact-agent/engine/src/chat/config.rs b/refact-agent/engine/src/chat/config.rs new file mode 100644 index 000000000..b93c614ff --- /dev/null +++ b/refact-agent/engine/src/chat/config.rs @@ -0,0 +1,111 @@ +use std::time::Duration; + +#[derive(Debug, Clone)] +pub struct ChatLimits { + pub max_queue_size: usize, + pub event_channel_capacity: usize, + pub recent_request_ids_capacity: usize, + pub max_images_per_message: usize, + pub max_parallel_tools: usize, + pub max_included_files: usize, + pub max_file_size: usize, +} + +impl Default for ChatLimits { + fn default() -> Self { + Self { + max_queue_size: 100, + event_channel_capacity: 4096, + recent_request_ids_capacity: 100, + max_images_per_message: 5, + max_parallel_tools: 16, + max_included_files: 15, + max_file_size: 40_000, + } + } +} + +#[derive(Debug, Clone)] +pub struct ChatTimeouts { + pub session_idle: Duration, + pub session_cleanup_interval: Duration, + pub stream_idle: Duration, + pub stream_total: Duration, + pub stream_heartbeat: Duration, + pub watcher_debounce: Duration, + pub watcher_idle: Duration, + pub watcher_poll: Duration, +} + +impl Default for ChatTimeouts { + fn default() -> Self { + Self { + session_idle: Duration::from_secs(30 * 60), + session_cleanup_interval: Duration::from_secs(5 * 60), + stream_idle: Duration::from_secs(60 * 60), + stream_total: Duration::from_secs(60 * 60), + stream_heartbeat: Duration::from_secs(2), + watcher_debounce: Duration::from_millis(200), + watcher_idle: Duration::from_secs(60), + watcher_poll: Duration::from_millis(50), + } + } +} + +#[derive(Debug, Clone)] +pub struct TokenDefaults { + pub min_budget_tokens: usize, + pub default_n_ctx: usize, +} + +impl Default for TokenDefaults { + fn default() -> Self { + Self { + min_budget_tokens: 1024, + default_n_ctx: 32000, + } + } +} + +#[derive(Debug, Clone)] +pub struct PresentationLimits { + pub preview_chars: usize, +} + +impl Default for PresentationLimits { + fn default() -> Self { + Self { preview_chars: 120 } + } +} + +#[derive(Debug, Clone, Default)] +pub struct ChatConfig { + pub limits: ChatLimits, + pub timeouts: ChatTimeouts, + pub tokens: TokenDefaults, + pub presentation: PresentationLimits, +} + +impl ChatConfig { + pub fn new() -> Self { + Self::default() + } +} + +pub static CHAT_CONFIG: std::sync::LazyLock = std::sync::LazyLock::new(ChatConfig::new); + +pub fn limits() -> &'static ChatLimits { + &CHAT_CONFIG.limits +} + +pub fn timeouts() -> &'static ChatTimeouts { + &CHAT_CONFIG.timeouts +} + +pub fn tokens() -> &'static TokenDefaults { + &CHAT_CONFIG.tokens +} + +pub fn presentation() -> &'static PresentationLimits { + &CHAT_CONFIG.presentation +} diff --git a/refact-agent/engine/src/chat/content.rs b/refact-agent/engine/src/chat/content.rs new file mode 100644 index 000000000..93e50f655 --- /dev/null +++ b/refact-agent/engine/src/chat/content.rs @@ -0,0 +1,373 @@ +use tracing::warn; + +use crate::call_validation::ChatContent; +use crate::scratchpads::multimodality::MultimodalElement; +use crate::scratchpads::scratchpad_utils::parse_image_b64_from_image_url_openai; +use super::config::limits; + +pub fn validate_content_with_attachments( + content: &serde_json::Value, + attachments: &[serde_json::Value], +) -> Result { + let mut elements: Vec = Vec::new(); + let mut image_count = 0; + + if let Some(s) = content.as_str() { + if !s.is_empty() { + elements.push( + MultimodalElement::new("text".to_string(), s.to_string()) + .map_err(|e| format!("Invalid text content: {}", e))?, + ); + } + } else if let Some(arr) = content.as_array() { + for (idx, item) in arr.iter().enumerate() { + let item_type = item + .get("type") + .and_then(|t| t.as_str()) + .ok_or_else(|| format!("Content element {} missing 'type' field", idx))?; + match item_type { + "text" => { + let text = item + .get("text") + .and_then(|t| t.as_str()) + .ok_or_else(|| format!("Content element {} missing 'text' field", idx))?; + elements.push( + MultimodalElement::new("text".to_string(), text.to_string()) + .map_err(|e| format!("Invalid text content at {}: {}", idx, e))?, + ); + } + "image_url" => { + image_count += 1; + if image_count > limits().max_images_per_message { + return Err(format!( + "Too many images: max {} allowed", + limits().max_images_per_message + )); + } + let url = item + .get("image_url") + .and_then(|u| u.get("url")) + .and_then(|u| u.as_str()) + .ok_or_else(|| format!("Content element {} missing image_url.url", idx))?; + let (image_type, _, image_content) = parse_image_b64_from_image_url_openai(url) + .ok_or_else(|| format!("Invalid image URL format at element {}", idx))?; + elements.push( + MultimodalElement::new(image_type, image_content) + .map_err(|e| format!("Invalid image at {}: {}", idx, e))?, + ); + } + other => { + return Err(format!( + "Unknown content type '{}' at element {}", + other, idx + )); + } + } + } + } else if !content.is_null() { + return Err(format!("Content must be string or array, got {}", content)); + } + + for (idx, attachment) in attachments.iter().enumerate() { + let url = attachment + .get("image_url") + .and_then(|u| u.get("url")) + .and_then(|u| u.as_str()) + .ok_or_else(|| format!("Attachment {} missing image_url.url", idx))?; + image_count += 1; + if image_count > limits().max_images_per_message { + return Err(format!( + "Too many images: max {} allowed", + limits().max_images_per_message + )); + } + let (image_type, _, image_content) = parse_image_b64_from_image_url_openai(url) + .ok_or_else(|| format!("Invalid attachment image URL at {}", idx))?; + elements.push( + MultimodalElement::new(image_type, image_content) + .map_err(|e| format!("Invalid attachment image at {}: {}", idx, e))?, + ); + } + + if elements.is_empty() { + Ok(ChatContent::SimpleText(String::new())) + } else if elements.len() == 1 && elements[0].m_type == "text" { + Ok(ChatContent::SimpleText(elements.remove(0).m_content)) + } else { + Ok(ChatContent::Multimodal(elements)) + } +} + +pub fn parse_content_with_attachments( + content: &serde_json::Value, + attachments: &[serde_json::Value], +) -> ChatContent { + let base_content = parse_content_from_value(content); + + if attachments.is_empty() { + return base_content; + } + + let mut elements: Vec = match base_content { + ChatContent::SimpleText(s) if !s.is_empty() => { + vec![MultimodalElement::new("text".to_string(), s).unwrap()] + } + ChatContent::Multimodal(v) => v, + _ => Vec::new(), + }; + + for attachment in attachments { + if let Some(url) = attachment + .get("image_url") + .and_then(|u| u.get("url")) + .and_then(|u| u.as_str()) + { + if let Some((image_type, _, image_content)) = parse_image_b64_from_image_url_openai(url) + { + if let Ok(el) = MultimodalElement::new(image_type, image_content) { + elements.push(el); + } + } + } + } + + if elements.is_empty() { + ChatContent::SimpleText(String::new()) + } else if elements.len() == 1 && elements[0].m_type == "text" { + ChatContent::SimpleText(elements.remove(0).m_content) + } else { + ChatContent::Multimodal(elements) + } +} + +fn parse_content_from_value(content: &serde_json::Value) -> ChatContent { + if let Some(s) = content.as_str() { + return ChatContent::SimpleText(s.to_string()); + } + + if let Some(arr) = content.as_array() { + let mut elements = Vec::new(); + for item in arr { + let item_type = item.get("type").and_then(|t| t.as_str()).unwrap_or(""); + match item_type { + "text" => { + if let Some(text) = item.get("text").and_then(|t| t.as_str()) { + if let Ok(el) = MultimodalElement::new("text".to_string(), text.to_string()) + { + elements.push(el); + } + } + } + "image_url" => { + if let Some(url) = item + .get("image_url") + .and_then(|u| u.get("url")) + .and_then(|u| u.as_str()) + { + if let Some((image_type, _, image_content)) = + parse_image_b64_from_image_url_openai(url) + { + if let Ok(el) = MultimodalElement::new(image_type, image_content) { + elements.push(el); + } + } + } + } + _ => { + warn!( + "Unknown content type '{}' in message, preserving as text", + item_type + ); + if let Ok(el) = MultimodalElement::new("text".to_string(), item.to_string()) { + elements.push(el); + } + } + } + } + if !elements.is_empty() { + return ChatContent::Multimodal(elements); + } + } + + ChatContent::SimpleText(String::new()) +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_validate_content_empty_array_returns_empty() { + let content = json!([]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_ok()); + match result.unwrap() { + ChatContent::SimpleText(s) => assert!(s.is_empty()), + _ => panic!("Expected empty SimpleText"), + } + } + + #[test] + fn test_validate_content_missing_type_error() { + let content = json!([{"text": "hello"}]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("type")); + } + + #[test] + fn test_validate_content_text_missing_text_field_error() { + let content = json!([{"type": "text"}]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("text")); + } + + #[test] + fn test_validate_content_image_missing_url_error() { + let content = json!([{"type": "image_url"}]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("image_url.url")); + } + + #[test] + fn test_validate_content_unknown_type_error() { + let content = json!([{"type": "video", "data": "xyz"}]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Unknown content type")); + } + + #[test] + fn test_validate_content_non_string_non_array_error() { + let content = json!({"key": "value"}); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("must be string or array")); + } + + #[test] + fn test_validate_content_number_error() { + let content = json!(123); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_err()); + } + + #[test] + fn test_validate_content_simple_string_ok() { + let content = json!("Hello world"); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_ok()); + match result.unwrap() { + ChatContent::SimpleText(s) => assert_eq!(s, "Hello world"), + _ => panic!("Expected SimpleText"), + } + } + + #[test] + fn test_validate_content_text_array_ok() { + let content = json!([{"type": "text", "text": "Hello"}]); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_ok()); + match result.unwrap() { + ChatContent::SimpleText(s) => assert_eq!(s, "Hello"), + _ => panic!("Expected SimpleText for single text element"), + } + } + + #[test] + fn test_validate_content_null_returns_empty() { + let content = json!(null); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_ok()); + match result.unwrap() { + ChatContent::SimpleText(s) => assert!(s.is_empty()), + _ => panic!("Expected empty SimpleText"), + } + } + + #[test] + fn test_validate_content_empty_string_returns_empty() { + let content = json!(""); + let result = validate_content_with_attachments(&content, &[]); + assert!(result.is_ok()); + match result.unwrap() { + ChatContent::SimpleText(s) => assert!(s.is_empty()), + _ => panic!("Expected empty SimpleText"), + } + } + + #[test] + fn test_parse_content_string() { + let content = json!("Simple text"); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::SimpleText(s) => assert_eq!(s, "Simple text"), + _ => panic!("Expected SimpleText"), + } + } + + #[test] + fn test_parse_content_null_returns_empty() { + let content = json!(null); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::SimpleText(s) => assert!(s.is_empty()), + _ => panic!("Expected empty SimpleText"), + } + } + + #[test] + fn test_parse_content_unknown_type_preserved_as_text() { + let content = json!([{"type": "custom", "data": "xyz"}]); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::Multimodal(elements) => { + assert_eq!(elements.len(), 1); + assert_eq!(elements[0].m_type, "text"); + assert!(elements[0].m_content.contains("custom")); + } + _ => panic!("Expected Multimodal with preserved unknown type"), + } + } + + #[test] + fn test_parse_content_empty_array_returns_empty() { + let content = json!([]); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::SimpleText(s) => assert!(s.is_empty()), + _ => panic!("Expected empty SimpleText"), + } + } + + #[test] + fn test_parse_content_text_array_single_element() { + let content = json!([{"type": "text", "text": "Hello"}]); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::Multimodal(elements) => { + assert_eq!(elements.len(), 1); + assert_eq!(elements[0].m_content, "Hello"); + } + _ => panic!("Expected Multimodal"), + } + } + + #[test] + fn test_parse_content_multiple_text_elements() { + let content = json!([ + {"type": "text", "text": "Hello"}, + {"type": "text", "text": "World"} + ]); + let result = parse_content_with_attachments(&content, &[]); + match result { + ChatContent::Multimodal(elements) => { + assert_eq!(elements.len(), 2); + } + _ => panic!("Expected Multimodal"), + } + } +} diff --git a/refact-agent/engine/src/chat/generation.rs b/refact-agent/engine/src/chat/generation.rs new file mode 100644 index 000000000..16aa0f949 --- /dev/null +++ b/refact-agent/engine/src/chat/generation.rs @@ -0,0 +1,1127 @@ +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; +use tracing::{info, warn}; +use uuid::Uuid; + +use crate::at_commands::at_commands::AtCommandsContext; +use crate::call_validation::{ + ChatContent, ChatMessage, ChatMeta, ChatUsage, SamplingParameters, is_agentic_mode_id, +}; +use crate::chat::tool_call_recovery; +use crate::global_context::GlobalContext; +use crate::llm::LlmRequest; +use crate::llm::params::CacheControl; +use crate::scratchpad_abstract::HasTokenizerAndEot; +use crate::constants::CHAT_TOP_N; +use crate::http::routers::v1::knowledge_enrichment::enrich_messages_with_knowledge; + +use super::types::*; +use super::trajectories::{maybe_save_trajectory, check_external_reload_pending}; +use super::tools::{process_tool_calls_once, ToolStepOutcome}; +use super::prepare::{prepare_chat_passthrough, ChatPrepareOptions}; +use super::prompts::prepend_the_right_system_prompt_and_maybe_more_initial_messages; +use super::stream_core::{run_llm_stream, StreamRunParams, StreamCollector, normalize_tool_call, ChoiceFinal}; +use super::queue::inject_priority_messages_if_any; +use super::config::tokens; + + + +pub async fn prepare_session_preamble_and_knowledge( + gcx: Arc>, + session_arc: Arc>, +) { + let (thread, chat_id, has_system, has_project_context) = { + let session = session_arc.lock().await; + let has_sys = session.messages.first().map(|m| m.role == "system").unwrap_or(false); + let has_proj = session.messages.iter().any(|m| { + m.role == "context_file" + && m.tool_call_id == crate::chat::system_context::PROJECT_CONTEXT_MARKER + }); + (session.thread.clone(), session.chat_id.clone(), has_sys, has_proj) + }; + + let needs_preamble = !has_system || (!has_project_context && thread.include_project_info); + + if needs_preamble { + let tools: Vec = + crate::tools::tools_list::get_tools_for_mode(gcx.clone(), &thread.mode, Some(&thread.model)) + .await + .into_iter() + .map(|tool| tool.tool_description()) + .collect(); + let tool_names: std::collections::HashSet = + tools.iter().map(|t| t.name.clone()).collect(); + + let meta = ChatMeta { + chat_id: chat_id.clone(), + chat_mode: thread.mode.clone(), + chat_remote: false, + current_config_file: String::new(), + context_tokens_cap: thread.context_tokens_cap, + include_project_info: thread.include_project_info, + request_attempt_id: Uuid::new_v4().to_string(), + }; + + let messages = { + let session = session_arc.lock().await; + session.messages.clone() + }; + let mut has_rag_results = crate::scratchpads::scratchpad_utils::HasRagResults::new(); + let messages_with_preamble = + prepend_the_right_system_prompt_and_maybe_more_initial_messages( + gcx.clone(), + messages, + &meta, + &thread.task_meta, + &mut has_rag_results, + tool_names, + &thread.mode, + &thread.model, + ) + .await; + + let first_conv_idx = messages_with_preamble + .iter() + .position(|m| m.role == "user" || m.role == "assistant") + .unwrap_or(messages_with_preamble.len()); + + if first_conv_idx > 0 { + let mut session = session_arc.lock().await; + + let mut system_insert_idx = 0; + let mut context_insert_idx = session + .messages + .iter() + .position(|m| m.role == "system") + .map(|i| i + 1) + .unwrap_or(0); + + let mut inserted = 0; + for msg in messages_with_preamble.iter().take(first_conv_idx) { + if msg.role == "assistant" { + continue; + } + if msg.role == "system" + && session.messages.first().map(|m| m.role == "system").unwrap_or(false) + { + continue; + } + if msg.role == "cd_instruction" + && session.messages.iter().any(|m| m.role == "cd_instruction") + { + continue; + } + if msg.role == "context_file" + && session + .messages + .iter() + .any(|m| m.role == "context_file" && m.tool_call_id == msg.tool_call_id) + { + continue; + } + let insert_idx = if msg.role == "system" { + let idx = system_insert_idx; + system_insert_idx += 1; + context_insert_idx += 1; + idx + } else { + let idx = context_insert_idx; + context_insert_idx += 1; + idx + }; + session.insert_message(insert_idx, msg.clone()); + inserted += 1; + } + if inserted > 0 { + info!("Saved {} preamble messages to session", inserted); + } + } + } + + // Knowledge enrichment for agentic mode + let last_is_user = { + let session = session_arc.lock().await; + session.messages.last().map(|m| m.role == "user").unwrap_or(false) + }; + if is_agentic_mode_id(&thread.mode) && last_is_user { + let mut messages = { + let session = session_arc.lock().await; + session.messages.clone() + }; + let msg_count_before = messages.len(); + enrich_messages_with_knowledge(gcx.clone(), &mut messages, Some(&chat_id)).await; + if messages.len() > msg_count_before { + let local_last_user_idx = messages.iter().rposition(|m| m.role == "user").unwrap_or(0); + if local_last_user_idx > 0 { + let enriched_msg = &messages[local_last_user_idx - 1]; + if enriched_msg.role == "context_file" { + let mut session = session_arc.lock().await; + let session_last_user_idx = session + .messages + .iter() + .rposition(|m| m.role == "user") + .unwrap_or(0); + session.insert_message(session_last_user_idx, enriched_msg.clone()); + info!( + "Saved knowledge enrichment context_file to session at index {}", + session_last_user_idx + ); + } + } + } + } +} + +pub fn save_rag_results_to_session( + session: &mut ChatSession, + rag_results: &[serde_json::Value], +) { + let last_user_idx = session.messages.iter().rposition(|m| m.role == "user"); + if let Some(insert_idx) = last_user_idx { + let existing_content: std::collections::HashSet = session.messages.iter() + .filter(|m| m.role == "context_file" || m.role == "plain_text") + .map(|m| m.content.content_text_only()) + .collect(); + let mut offset = 0; + for rag_msg_json in rag_results { + if let Ok(msg) = serde_json::from_value::(rag_msg_json.clone()) { + if (msg.role == "context_file" || msg.role == "plain_text") + && !existing_content.contains(&msg.content.content_text_only()) + { + session.insert_message(insert_idx + offset, msg); + offset += 1; + } + } + } + } +} + +fn tail_needs_assistant(messages: &[ChatMessage]) -> bool { + let mut saw_toolish = false; + + for m in messages.iter().rev() { + match m.role.as_str() { + "assistant" => { + if !saw_toolish { + return false; + } + let Some(tcs) = m.tool_calls.as_ref() else { + return false; + }; + if tcs.is_empty() { + return false; + } + return tcs.iter().any(|tc| !tc.id.starts_with("srvtoolu_")); + } + "user" => return true, + "tool" | "context_file" => saw_toolish = true, + _ => {} + } + } + + false +} + +pub fn start_generation( + gcx: Arc>, + session_arc: Arc>, +) -> std::pin::Pin + Send>> { + Box::pin(async move { + loop { + let (thread, chat_id) = { + let session = session_arc.lock().await; + ( + session.thread.clone(), + session.chat_id.clone(), + ) + }; + + let abort_flag = { + let mut session = session_arc.lock().await; + match session.start_stream() { + Some((_message_id, abort_flag)) => abort_flag, + None => { + warn!( + "Cannot start generation for {}: already generating", + chat_id + ); + break; + } + } + }; + + let generation_result = run_llm_generation( + gcx.clone(), + session_arc.clone(), + thread, + chat_id.clone(), + abort_flag.clone(), + ) + .await; + + if let Err(e) = generation_result { + let task_meta_opt = { + let mut session = session_arc.lock().await; + if !session.abort_flag.load(Ordering::SeqCst) { + session.finish_stream_with_error(e); + } + session.thread.task_meta.clone() + }; + + if let Some(task_meta) = task_meta_opt { + let error_msg = { + let session = session_arc.lock().await; + session.task_agent_error.clone() + }; + if let Some(error) = error_msg { + super::task_agent_monitor::handle_agent_streaming_error( + gcx.clone(), + &task_meta, + &error, + ) + .await; + } + } + break; + } + + if abort_flag.load(Ordering::SeqCst) { + break; + } + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + + let (mode_id, model_id) = { + let session = session_arc.lock().await; + (session.thread.mode.clone(), session.thread.model.clone()) + }; + + match process_tool_calls_once(gcx.clone(), session_arc.clone(), &mode_id, Some(&model_id)).await { + ToolStepOutcome::NoToolCalls => { + if inject_priority_messages_if_any(gcx.clone(), session_arc.clone()).await { + continue; + } + let should_continue = { + let session = session_arc.lock().await; + tail_needs_assistant(&session.messages) + }; + if should_continue { + continue; + } + break; + } + ToolStepOutcome::Paused => break, + ToolStepOutcome::Stop => break, + ToolStepOutcome::Continue => { + inject_priority_messages_if_any(gcx.clone(), session_arc.clone()).await; + } + } + } + + check_external_reload_pending(gcx.clone(), session_arc.clone()).await; + + { + let session = session_arc.lock().await; + session.abort_flag.store(false, Ordering::SeqCst); + session.queue_notify.notify_one(); + } + }) +} + +pub async fn run_llm_generation( + gcx: Arc>, + session_arc: Arc>, + thread: ThreadParams, + chat_id: String, + abort_flag: Arc, +) -> Result<(), String> { + let tools: Vec = + crate::tools::tools_list::get_tools_for_mode(gcx.clone(), &thread.mode, Some(&thread.model)) + .await + .into_iter() + .map(|tool| tool.tool_description()) + .collect(); + + info!("session generation: tools count = {}", tools.len()); + + let caps = crate::global_context::try_load_caps_quickly_if_not_present(gcx.clone(), 0) + .await + .map_err(|e| e.message)?; + let model_rec = crate::caps::resolve_chat_model(caps.clone(), &thread.model)?; + + let model_n_ctx = if model_rec.base.n_ctx > 0 { + model_rec.base.n_ctx + } else { + tokens().default_n_ctx + }; + let effective_n_ctx = match thread.context_tokens_cap { + Some(cap) if cap > 0 => cap.min(model_n_ctx), + _ => model_n_ctx, + }; + let tokenizer_arc = crate::tokens::cached_tokenizer(gcx.clone(), &model_rec.base).await?; + let t = HasTokenizerAndEot::new(tokenizer_arc); + + let meta = ChatMeta { + chat_id: chat_id.clone(), + chat_mode: thread.mode.clone(), + chat_remote: false, + current_config_file: String::new(), + context_tokens_cap: thread.context_tokens_cap, + include_project_info: thread.include_project_info, + request_attempt_id: Uuid::new_v4().to_string(), + }; + + let messages = { + let session = session_arc.lock().await; + session.messages.clone() + }; + let model_type_defaults = caps.user_defaults.defaults_for_model( + &model_rec.base.id, + &caps.defaults.chat_default_model, + &caps.defaults.chat_light_model, + &caps.defaults.chat_thinking_model, + ); + let mut parameters = SamplingParameters { + temperature: thread.temperature + .or(model_type_defaults.temperature), + frequency_penalty: thread.frequency_penalty, + max_new_tokens: thread.max_tokens + .or(model_type_defaults.max_new_tokens) + .unwrap_or(0), + boost_reasoning: thread.boost_reasoning + .unwrap_or_else(|| model_type_defaults.boost_reasoning.unwrap_or(false)), + reasoning_effort: thread.reasoning_effort.as_ref().and_then(|s| { + match s.as_str() { + "low" => Some(crate::call_validation::ReasoningEffort::Low), + "medium" => Some(crate::call_validation::ReasoningEffort::Medium), + "high" => Some(crate::call_validation::ReasoningEffort::High), + "xhigh" => Some(crate::call_validation::ReasoningEffort::XHigh), + "max" => Some(crate::call_validation::ReasoningEffort::Max), + _ => None, + } + }).or_else(|| { + model_type_defaults.reasoning_effort.as_ref().and_then(|s| { + match s.as_str() { + "low" => Some(crate::call_validation::ReasoningEffort::Low), + "medium" => Some(crate::call_validation::ReasoningEffort::Medium), + "high" => Some(crate::call_validation::ReasoningEffort::High), + "xhigh" => Some(crate::call_validation::ReasoningEffort::XHigh), + "max" => Some(crate::call_validation::ReasoningEffort::Max), + _ => None, + } + }) + }), + thinking_budget: thread.thinking_budget + .or(model_type_defaults.thinking_budget), + ..Default::default() + }; + + let ccx = AtCommandsContext::new( + gcx.clone(), + effective_n_ctx, + CHAT_TOP_N, + false, + messages.clone(), + chat_id.clone(), + thread.root_chat_id.clone(), + model_rec.base.id.clone(), + thread.task_meta.clone(), + ) + .await; + let ccx_arc = Arc::new(AMutex::new(ccx)); + + let options = ChatPrepareOptions { + prepend_system_prompt: false, + allow_at_commands: true, + allow_tool_prerun: true, + supports_tools: model_rec.supports_tools, + parallel_tool_calls: thread.parallel_tool_calls, + cache_control: CacheControl::Ephemeral, + ..Default::default() + }; + + let prepared = prepare_chat_passthrough( + gcx.clone(), + ccx_arc.clone(), + &t, + messages, + &thread, + &model_rec.base.id, + &thread.mode, + tools, + &meta, + &mut parameters, + &options, + ) + .await?; + + { + let mut session = session_arc.lock().await; + session.last_prompt_messages = prepared.limited_messages.clone(); + save_rag_results_to_session(&mut session, &prepared.rag_results); + } + + run_streaming_generation( + gcx, + session_arc, + prepared.llm_request, + &model_rec, + abort_flag, + ) + .await +} + +async fn run_streaming_generation( + gcx: Arc>, + session_arc: Arc>, + mut llm_request: LlmRequest, + model_rec: &crate::caps::ChatModelRecord, + abort_flag: Arc, +) -> Result<(), String> { + info!("session generation: model={}, messages={}", llm_request.model_id, llm_request.messages.len()); + let chat_id = { + let session = session_arc.lock().await; + session.chat_id.clone() + }; + + const TEMPERATURE_BUMP: f32 = 0.1; + const MAX_RETRY_TEMPERATURE: f32 = 0.5; + let user_specified_temp = llm_request.params.temperature; + let model_supports_temperature = model_rec.supports_temperature; + let can_retry_with_temp_bump = user_specified_temp.is_none() && model_supports_temperature; + let max_attempts = if can_retry_with_temp_bump { + (MAX_RETRY_TEMPERATURE / TEMPERATURE_BUMP).floor() as usize + 2 + } else { + 1 + }; + let mut attempt = 0; + + let result = loop { + attempt += 1; + if can_retry_with_temp_bump && attempt > 1 { + let retry_temp = TEMPERATURE_BUMP * (attempt - 2) as f32; + llm_request.params.temperature = Some(retry_temp.min(MAX_RETRY_TEMPERATURE)); + } + + let params = StreamRunParams { + llm_request: llm_request.clone(), + model_rec: model_rec.base.clone(), + chat_id: Some(chat_id.clone()), + abort_flag: Some(abort_flag.clone()), + supports_tools: model_rec.supports_tools, + supports_reasoning: model_rec.has_reasoning_support(), + reasoning_type: model_rec.reasoning_type_string(), + supports_temperature: model_rec.supports_temperature, + }; + + enum CollectorEventPayload { + DeltaOps(Vec), + Usage(ChatUsage), + } + + const EMITTER_QUEUE_CAPACITY: usize = 256; + let (tx, mut rx) = tokio::sync::mpsc::channel::(EMITTER_QUEUE_CAPACITY); + let overflow_usage = Arc::new(std::sync::Mutex::new(None::)); + let overflow_ops = Arc::new(std::sync::Mutex::new(Vec::::new())); + + struct SessionCollector { + tx: tokio::sync::mpsc::Sender, + overflow_usage: Arc>>, + overflow_ops: Arc>>, + } + + impl StreamCollector for SessionCollector { + fn on_delta_ops(&mut self, _choice_idx: usize, ops: Vec) { + match self.tx.try_send(CollectorEventPayload::DeltaOps(ops)) { + Ok(()) => {} + Err(tokio::sync::mpsc::error::TrySendError::Full(event)) => { + if let CollectorEventPayload::DeltaOps(ops) = event { + if let Ok(mut guard) = self.overflow_ops.lock() { + guard.extend(ops); + } + } + } + Err(tokio::sync::mpsc::error::TrySendError::Closed(_event)) => {} + } + } + + fn on_usage(&mut self, usage: &ChatUsage) { + let usage_clone = usage.clone(); + match self.tx.try_send(CollectorEventPayload::Usage(usage_clone.clone())) { + Ok(()) => {} + Err(tokio::sync::mpsc::error::TrySendError::Full(_event)) => { + if let Ok(mut guard) = self.overflow_usage.lock() { + *guard = Some(usage_clone); + } + } + Err(tokio::sync::mpsc::error::TrySendError::Closed(_event)) => {} + } + } + + fn on_finish(&mut self, _choice_idx: usize, _finish_reason: Option) {} + } + + let mut collector = SessionCollector { + tx, + overflow_usage: overflow_usage.clone(), + overflow_ops: overflow_ops.clone(), + }; + + let session_arc_emitter = session_arc.clone(); + let emitter_task = tokio::spawn(async move { + fn merge_events( + events: &mut Vec, + batched_ops: &mut Vec, + latest_usage: &mut Option, + ) { + for event in events.drain(..) { + match event { + CollectorEventPayload::DeltaOps(ops) => { + batched_ops.extend(ops); + } + CollectorEventPayload::Usage(usage) => { + *latest_usage = Some(usage); + } + } + } + } + + fn coalesce_text_ops(ops: Vec) -> Vec { + if ops.len() <= 1 { + return ops; + } + let mut out: Vec = Vec::with_capacity(ops.len()); + for op in ops { + match op { + DeltaOp::AppendContent { text } => { + if let Some(DeltaOp::AppendContent { text: ref mut prev }) = out.last_mut() { + prev.push_str(&text); + } else { + out.push(DeltaOp::AppendContent { text }); + } + } + DeltaOp::AppendReasoning { text } => { + if let Some(DeltaOp::AppendReasoning { text: ref mut prev }) = out.last_mut() { + prev.push_str(&text); + } else { + out.push(DeltaOp::AppendReasoning { text }); + } + } + other => out.push(other), + } + } + out + } + + fn split_utf8_chunks(text: &str, max_bytes: usize) -> Vec { + if text.len() <= max_bytes { + return vec![text.to_string()]; + } + let mut chunks = Vec::new(); + let mut start = 0usize; + while start < text.len() { + let mut end = (start + max_bytes).min(text.len()); + while end > start && !text.is_char_boundary(end) { + end -= 1; + } + if end == start { + end = text[start..] + .char_indices() + .nth(1) + .map(|(i, _)| start + i) + .unwrap_or(text.len()); + } + chunks.push(text[start..end].to_string()); + start = end; + } + chunks + } + + fn split_large_text_ops(ops: Vec, max_text_bytes: usize) -> Vec { + let mut out = Vec::new(); + for op in ops { + match op { + DeltaOp::AppendContent { text } => { + for chunk in split_utf8_chunks(&text, max_text_bytes) { + out.push(DeltaOp::AppendContent { text: chunk }); + } + } + DeltaOp::AppendReasoning { text } => { + for chunk in split_utf8_chunks(&text, max_text_bytes) { + out.push(DeltaOp::AppendReasoning { text: chunk }); + } + } + other => out.push(other), + } + } + out + } + + const MAX_BATCH_EVENTS: usize = 64; + const MAX_DELTA_OPS_PER_EMIT: usize = 128; + const MAX_DELTA_TEXT_BYTES: usize = 64 * 1024; + let mut pending = Vec::::new(); + + while let Some(first_event) = rx.recv().await { + pending.push(first_event); + + while pending.len() < MAX_BATCH_EVENTS { + match rx.try_recv() { + Ok(event) => pending.push(event), + Err(tokio::sync::mpsc::error::TryRecvError::Empty) => break, + Err(tokio::sync::mpsc::error::TryRecvError::Disconnected) => break, + } + } + + let mut batched_ops = Vec::new(); + let mut latest_usage: Option = None; + merge_events(&mut pending, &mut batched_ops, &mut latest_usage); + + if let Ok(mut guard) = overflow_ops.lock() { + if !guard.is_empty() { + let mut drained = std::mem::take(&mut *guard); + drained.append(&mut batched_ops); + batched_ops = drained; + } + } + if let Ok(mut guard) = overflow_usage.lock() { + if let Some(usage) = guard.take() { + latest_usage = Some(usage); + } + } + + let batched_ops = coalesce_text_ops(batched_ops); + let batched_ops = split_large_text_ops(batched_ops, MAX_DELTA_TEXT_BYTES); + + let mut session = session_arc_emitter.lock().await; + if !batched_ops.is_empty() { + for chunk in batched_ops.chunks(MAX_DELTA_OPS_PER_EMIT) { + session.emit_stream_delta(chunk.to_vec()); + } + } + if let Some(usage) = latest_usage { + session.draft_usage = Some(usage); + } + } + }); + + let results = run_llm_stream(gcx.clone(), params, &mut collector).await; + drop(collector); + let _ = emitter_task.await; + let results = results?; + + let mut result = results.into_iter().next().unwrap_or_default(); + + if is_result_empty(&result) { + if attempt < max_attempts && can_retry_with_temp_bump { + let current_temp_display = if attempt == 1 { + "default".to_string() + } else { + format!("{:.1}", TEMPERATURE_BUMP * (attempt - 2) as f32) + }; + let next_temp = (TEMPERATURE_BUMP * (attempt - 1) as f32).min(MAX_RETRY_TEMPERATURE); + warn!( + "Empty assistant response at T={}, retrying with T={:.1} (attempt {}/{})", + current_temp_display, next_temp, attempt, max_attempts + ); + { + let mut session = session_arc.lock().await; + if let Some(ref mut draft) = session.draft_message { + draft.content = ChatContent::SimpleText(String::new()); + draft.tool_calls = None; + draft.reasoning_content = None; + draft.thinking_blocks = None; + draft.citations = Vec::new(); + draft.server_content_blocks = Vec::new(); + draft.extra = serde_json::Map::new(); + } + session.draft_usage = None; + } + continue; + } else { + let effective_temp = llm_request.params.temperature.unwrap_or(0.0); + return Err(format!( + "Empty assistant response after {} attempts (T={:.1})", + max_attempts, effective_temp + )); + } + } + + // --- Tool call recovery --- + // GPT-5 Codex models occasionally leak tool calls into text content instead of + // emitting structured function_call events. Detect and recover them. + let allowed_tools = tool_call_recovery::allowed_tool_names(&llm_request.tools); + + // 1. Unwrap multi_tool_use.parallel wrappers in structured tool_calls + if !result.tool_calls_raw.is_empty() { + result.tool_calls_raw = tool_call_recovery::unwrap_multi_tool_use_parallel( + &result.tool_calls_raw, + &allowed_tools, + ); + } + + // 2. Recover tool calls from garbled ChatML content (when no structured calls exist) + if result.tool_calls_raw.is_empty() && !allowed_tools.is_empty() { + if let Some((cleaned_content, recovered_calls)) = + tool_call_recovery::recover_tool_calls_from_chatml_content( + &result.content, + &allowed_tools, + ) + { + warn!( + "tool_call_recovery: recovered {} tool call(s) from garbled content", + recovered_calls.len() + ); + result.content = cleaned_content; + result.tool_calls_raw = recovered_calls; + } + } + + if !result.tool_calls_raw.is_empty() { + let parsed: Vec<_> = result.tool_calls_raw.iter().filter_map(|tc| normalize_tool_call(tc)).collect(); + if parsed.is_empty() { + let has_content = !result.content.trim().is_empty() + || !result.reasoning.trim().is_empty() + || !result.server_content_blocks.is_empty() + || !result.citations.is_empty(); + tracing::warn!( + "All {} tool calls unparsable: {:?}", + result.tool_calls_raw.len(), + result.tool_calls_raw, + ); + if !has_content { + return Err("Model returned tool_calls but none were parsable".to_string()); + } + // Has useful content — discard unparsable tool calls and continue + result.tool_calls_raw.clear(); + } else if parsed.len() < result.tool_calls_raw.len() { + let dropped: Vec<_> = result.tool_calls_raw.iter() + .filter(|tc| normalize_tool_call(tc).is_none()) + .collect(); + tracing::warn!( + "Dropped {}/{} tool calls during normalization: {:?}", + dropped.len(), + result.tool_calls_raw.len(), + dropped, + ); + } + } + + break result; + }; + + let (model_id, usage_for_pricing) = { + let session = session_arc.lock().await; + (session.thread.model.clone(), session.draft_usage.clone()) + }; + let metering_usd = if let Some(ref usage) = usage_for_pricing { + if let Some(pricing) = get_model_pricing(&gcx, &model_id).await { + crate::providers::pricing::compute_cost(usage, &pricing) + } else { + None + } + } else { + None + }; + + { + let mut session = session_arc.lock().await; + if let Some(ref mut draft) = session.draft_message { + draft.content = ChatContent::SimpleText(result.content); + + if !result.tool_calls_raw.is_empty() { + info!( + "Parsing {} accumulated tool calls", + result.tool_calls_raw.len() + ); + let parsed: Vec<_> = result + .tool_calls_raw + .iter() + .filter_map(|tc| normalize_tool_call(tc)) + .collect(); + info!("Successfully parsed {} tool calls", parsed.len()); + if !parsed.is_empty() { + draft.tool_calls = Some(parsed); + } + } + + if !result.reasoning.is_empty() { + draft.reasoning_content = Some(result.reasoning); + } + if !result.thinking_blocks.is_empty() { + draft.thinking_blocks = Some(result.thinking_blocks); + } + if !result.citations.is_empty() { + draft.citations = result.citations; + } + if !result.server_content_blocks.is_empty() { + draft.server_content_blocks = result.server_content_blocks; + } + if !result.extra.is_empty() { + draft.extra = result.extra; + } + } + + // Store previous_response_id for stateful multi-turn on Platform API only. + // ChatGPT backend doesn't support previous_response_id, so don't store it — + // otherwise prepare_chat_passthrough activates tail-only mode and the server + // receives function_call_output without matching function_call items. + let is_chatgpt_backend = model_rec.base.endpoint.contains("chatgpt.com/backend-api"); + if model_rec.base.wire_format == crate::llm::WireFormat::OpenaiResponses && !is_chatgpt_backend { + if let Some(resp_id) = session + .draft_message + .as_ref() + .and_then(|m| m.extra.get("openai_response_id")) + .and_then(|v| v.as_str()) + { + if session.thread.previous_response_id.as_deref() != Some(resp_id) { + session.thread.previous_response_id = Some(resp_id.to_string()); + session.increment_version(); + } + } + } + + if let Some(ref mut usage) = session.draft_usage { + usage.metering_usd = metering_usd; + } + + session.finish_stream(result.finish_reason); + } + + Ok(()) +} + +async fn get_model_pricing( + gcx: &Arc>, + model_id: &str, +) -> Option { + let parts: Vec<&str> = model_id.splitn(2, '/').collect(); + if parts.len() != 2 { + return None; + } + let provider_name = parts[0]; + let model_name = parts[1]; + + let gcx_locked = gcx.read().await; + let registry = gcx_locked.providers.read().await; + + if let Some(provider) = registry.get(provider_name) { + return provider.model_pricing(model_name); + } + + None +} + +fn is_result_empty(result: &ChoiceFinal) -> bool { + result.content.trim().is_empty() + && result.tool_calls_raw.is_empty() + && result.reasoning.trim().is_empty() + && result.thinking_blocks.is_empty() + && result.citations.is_empty() + && result.server_content_blocks.is_empty() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::call_validation::{ChatToolCall, ChatToolFunction}; + + fn make_user_msg(content: &str) -> ChatMessage { + ChatMessage { + role: "user".to_string(), + content: ChatContent::SimpleText(content.to_string()), + ..Default::default() + } + } + + fn make_assistant_msg(content: &str) -> ChatMessage { + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText(content.to_string()), + ..Default::default() + } + } + + fn make_assistant_with_tool_call(tool_call_id: &str, tool_name: &str) -> ChatMessage { + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("".to_string()), + tool_calls: Some(vec![ChatToolCall { + id: tool_call_id.to_string(), + index: Some(0), + function: ChatToolFunction { + name: tool_name.to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + } + } + + fn make_tool_msg(tool_call_id: &str, content: &str) -> ChatMessage { + ChatMessage { + role: "tool".to_string(), + tool_call_id: tool_call_id.to_string(), + content: ChatContent::SimpleText(content.to_string()), + ..Default::default() + } + } + + fn make_context_file_msg() -> ChatMessage { + ChatMessage { + role: "context_file".to_string(), + content: ChatContent::SimpleText("file content".to_string()), + ..Default::default() + } + } + + #[test] + fn test_tail_needs_assistant_ends_with_assistant_no_tools() { + let messages = vec![make_user_msg("hello"), make_assistant_msg("response")]; + assert!(!tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_ends_with_user() { + let messages = vec![make_user_msg("hello")]; + assert!(tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_ends_with_tool_from_client() { + let messages = vec![ + make_user_msg("hello"), + make_assistant_with_tool_call("call_123", "cat"), + make_tool_msg("call_123", "file content"), + ]; + assert!(tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_ends_with_tool_from_server() { + let messages = vec![ + make_user_msg("hello"), + make_assistant_with_tool_call("srvtoolu_123", "web_search"), + make_tool_msg("srvtoolu_123", "search results"), + ]; + assert!(!tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_empty_assistant_discarded() { + let messages = vec![ + make_user_msg("hello"), + make_assistant_with_tool_call("call_123", "cat"), + make_tool_msg("call_123", "file content"), + ]; + assert!(tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_context_file_after_tool() { + let messages = vec![ + make_user_msg("hello"), + make_assistant_with_tool_call("call_123", "cat"), + make_tool_msg("call_123", "file content"), + make_context_file_msg(), + ]; + assert!(tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_multiple_tool_calls_mixed() { + let messages = vec![ + make_user_msg("hello"), + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("".to_string()), + tool_calls: Some(vec![ + ChatToolCall { + id: "call_123".to_string(), + index: Some(0), + function: ChatToolFunction { + name: "cat".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }, + ChatToolCall { + id: "srvtoolu_456".to_string(), + index: Some(1), + function: ChatToolFunction { + name: "web_search".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }, + ]), + ..Default::default() + }, + make_tool_msg("call_123", "file content"), + make_tool_msg("srvtoolu_456", "search results"), + ]; + assert!(tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_only_server_tools() { + let messages = vec![ + make_user_msg("hello"), + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("".to_string()), + tool_calls: Some(vec![ + ChatToolCall { + id: "srvtoolu_123".to_string(), + index: Some(0), + function: ChatToolFunction { + name: "web_search".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }, + ChatToolCall { + id: "srvtoolu_456".to_string(), + index: Some(1), + function: ChatToolFunction { + name: "web_search".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }, + ]), + ..Default::default() + }, + make_tool_msg("srvtoolu_123", "search results 1"), + make_tool_msg("srvtoolu_456", "search results 2"), + ]; + assert!(!tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_empty_messages() { + let messages: Vec = vec![]; + assert!(!tail_needs_assistant(&messages)); + } + + #[test] + fn test_tail_needs_assistant_assistant_with_empty_tool_calls() { + let messages = vec![ + make_user_msg("hello"), + ChatMessage { + role: "assistant".to_string(), + content: ChatContent::SimpleText("response".to_string()), + tool_calls: Some(vec![]), + ..Default::default() + }, + ]; + assert!(!tail_needs_assistant(&messages)); + } +} diff --git a/refact-agent/engine/src/chat/handlers.rs b/refact-agent/engine/src/chat/handlers.rs new file mode 100644 index 000000000..3c595d83f --- /dev/null +++ b/refact-agent/engine/src/chat/handlers.rs @@ -0,0 +1,370 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::sync::atomic::Ordering; +use axum::extract::Path; +use axum::http::{Response, StatusCode}; +use axum::Extension; +use hyper::Body; +use tokio::sync::{broadcast, RwLock as ARwLock}; + +use crate::custom_error::ScratchError; +use crate::global_context::GlobalContext; + +use super::types::*; +use super::session::get_or_create_session_with_trajectory; +use super::content::validate_content_with_attachments; +use super::queue::process_command_queue; +use super::trajectory_ops::sanitize_messages_for_model_switch; +use super::trajectories::validate_trajectory_id; +use crate::yaml_configs::customization_registry::{get_mode_config, map_legacy_mode_to_id}; + +pub async fn handle_v1_chat_subscribe( + Extension(gcx): Extension>>, + axum::extract::Query(params): axum::extract::Query>, +) -> Result, ScratchError> { + let chat_id = params + .get("chat_id") + .ok_or_else(|| ScratchError::new(StatusCode::BAD_REQUEST, "chat_id required".to_string()))? + .clone(); + validate_trajectory_id(&chat_id)?; + + let sessions = { + let gcx_locked = gcx.read().await; + gcx_locked.chat_sessions.clone() + }; + + let session_arc = get_or_create_session_with_trajectory(gcx.clone(), &sessions, &chat_id).await; + let session = session_arc.lock().await; + let snapshot = session.snapshot(); + let mut rx = session.subscribe(); + let initial_seq = session.event_seq; + drop(session); + + let initial_envelope = EventEnvelope { + chat_id: chat_id.clone(), + seq: initial_seq, + event: snapshot, + }; + + let session_for_stream = session_arc.clone(); + let chat_id_for_stream = chat_id.clone(); + + let stream = async_stream::stream! { + let json = serde_json::to_string(&initial_envelope).unwrap_or_default(); + yield Ok::<_, std::convert::Infallible>(format!("data: {}\n\n", json)); + + let mut heartbeat_interval = tokio::time::interval(std::time::Duration::from_secs(15)); + heartbeat_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + loop { + tokio::select! { + result = rx.recv() => { + match result { + Ok(envelope) => { + let json = serde_json::to_string(&envelope).unwrap_or_default(); + yield Ok::<_, std::convert::Infallible>(format!("data: {}\n\n", json)); + } + Err(broadcast::error::RecvError::Lagged(skipped)) => { + tracing::info!("SSE subscriber lagged, skipped {} events, sending fresh snapshot", skipped); + let session = session_for_stream.lock().await; + let recovery_envelope = EventEnvelope { + chat_id: chat_id_for_stream.clone(), + seq: session.event_seq, + event: session.snapshot(), + }; + drop(session); + let json = serde_json::to_string(&recovery_envelope).unwrap_or_default(); + yield Ok::<_, std::convert::Infallible>(format!("data: {}\n\n", json)); + } + Err(broadcast::error::RecvError::Closed) => break, + } + } + _ = heartbeat_interval.tick() => { + if session_for_stream.lock().await.closed { + break; + } + yield Ok::<_, std::convert::Infallible>(format!(": hb {}\n\n", chrono::Utc::now().timestamp())); + } + } + } + }; + + Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "text/event-stream") + .header("Cache-Control", "no-cache") + .header("Connection", "keep-alive") + .body(Body::wrap_stream(stream)) + .unwrap()) +} + +pub async fn handle_v1_chat_command( + Extension(gcx): Extension>>, + Path(chat_id): Path, + body_bytes: hyper::body::Bytes, +) -> Result, ScratchError> { + validate_trajectory_id(&chat_id)?; + + let request: CommandRequest = serde_json::from_slice(&body_bytes) + .map_err(|e| ScratchError::new(StatusCode::BAD_REQUEST, format!("Invalid JSON: {}", e)))?; + + let sessions = { + let gcx_locked = gcx.read().await; + gcx_locked.chat_sessions.clone() + }; + + let session_arc = get_or_create_session_with_trajectory(gcx.clone(), &sessions, &chat_id).await; + let mut session = session_arc.lock().await; + + if session.is_duplicate_request(&request.client_request_id) { + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id.clone(), + accepted: true, + result: Some(serde_json::json!({"duplicate": true})), + }); + return Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"duplicate"}"#)) + .unwrap()); + } + + if matches!(request.command, ChatCommand::Abort {}) { + session.abort_stream(); + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id, + accepted: true, + result: Some(serde_json::json!({"aborted": true})), + }); + return Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"aborted"}"#)) + .unwrap()); + } + + if let ChatCommand::SetParams { ref patch } = request.command { + let old_model = session.thread.model.clone(); + let old_mode = session.thread.mode.clone(); + let (mut changed, sanitized_patch) = + super::queue::apply_setparams_patch(&mut session.thread, patch); + + let mode_in_patch = patch.get("mode").and_then(|v| v.as_str()); + if let Some(mode_str) = mode_in_patch { + let normalized_mode = map_legacy_mode_to_id(mode_str); + if session.thread.mode != normalized_mode { + session.thread.mode = normalized_mode.to_string(); + changed = true; + } + } + + let mode_changed = session.thread.mode != old_mode; + if mode_changed { + let model_id = if session.thread.model.is_empty() { None } else { Some(session.thread.model.as_str()) }; + if let Some(mode_config) = get_mode_config(gcx.clone(), &session.thread.mode, model_id).await { + let defaults = &mode_config.thread_defaults; + if let Some(v) = defaults.include_project_info { + if session.thread.include_project_info != v { + session.thread.include_project_info = v; + changed = true; + } + } + if let Some(v) = defaults.checkpoints_enabled { + if session.thread.checkpoints_enabled != v { + session.thread.checkpoints_enabled = v; + changed = true; + } + } + if let Some(v) = defaults.auto_approve_editing_tools { + if session.thread.auto_approve_editing_tools != v { + session.thread.auto_approve_editing_tools = v; + changed = true; + } + } + if let Some(v) = defaults.auto_approve_dangerous_commands { + if session.thread.auto_approve_dangerous_commands != v { + session.thread.auto_approve_dangerous_commands = v; + changed = true; + } + } + } + } + + if session.thread.model != old_model { + sanitize_messages_for_model_switch(&mut session.messages); + } + let title_in_patch = patch.get("title").and_then(|v| v.as_str()); + let is_gen_in_patch = patch.get("is_title_generated").and_then(|v| v.as_bool()); + if let Some(title) = title_in_patch { + let is_generated = is_gen_in_patch.unwrap_or(false); + session.set_title(title.to_string(), is_generated); + } else if let Some(is_gen) = is_gen_in_patch { + if session.thread.is_title_generated != is_gen { + session.thread.is_title_generated = is_gen; + let title = session.thread.title.clone(); + session.set_title(title, is_gen); + } + } + + let mut patch_for_chat_sse = sanitized_patch; + if let Some(obj) = patch_for_chat_sse.as_object_mut() { + obj.remove("title"); + obj.remove("is_title_generated"); + if mode_changed { + obj.insert("mode".to_string(), serde_json::json!(session.thread.mode)); + obj.insert("include_project_info".to_string(), serde_json::json!(session.thread.include_project_info)); + obj.insert("checkpoints_enabled".to_string(), serde_json::json!(session.thread.checkpoints_enabled)); + obj.insert("auto_approve_editing_tools".to_string(), serde_json::json!(session.thread.auto_approve_editing_tools)); + obj.insert("auto_approve_dangerous_commands".to_string(), serde_json::json!(session.thread.auto_approve_dangerous_commands)); + } + } + session.emit(ChatEvent::ThreadUpdated { + params: patch_for_chat_sse, + }); + if changed { + session.increment_version(); + session.touch(); + } + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id, + accepted: true, + result: Some(serde_json::json!({"applied": true})), + }); + drop(session); + if changed { + super::trajectories::maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + } + return Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"applied"}"#)) + .unwrap()); + } + + let is_critical = (session.runtime.state == SessionState::Paused + && matches!( + request.command, + ChatCommand::ToolDecision { .. } | ChatCommand::ToolDecisions { .. } + )) + || (session.runtime.state == SessionState::WaitingIde + && matches!(request.command, ChatCommand::IdeToolResult { .. })); + + if session.command_queue.len() >= max_queue_size() && !is_critical { + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id, + accepted: false, + result: Some(serde_json::json!({"error": "queue full"})), + }); + return Ok(Response::builder() + .status(StatusCode::TOO_MANY_REQUESTS) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"queue_full"}"#)) + .unwrap()); + } + + let validation_error = match &request.command { + ChatCommand::UserMessage { + content, + attachments, + } => validate_content_with_attachments(content, attachments).err(), + ChatCommand::RetryFromIndex { + content, + attachments, + .. + } => validate_content_with_attachments(content, attachments).err(), + ChatCommand::UpdateMessage { + content, + attachments, + .. + } => validate_content_with_attachments(content, attachments).err(), + _ => None, + }; + + if let Some(error) = validation_error { + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id, + accepted: false, + result: Some(serde_json::json!({"error": error})), + }); + let body = serde_json::to_string(&serde_json::json!({ + "status": "invalid_content", + "error": error + })).unwrap_or_else(|_| r#"{"status":"invalid_content"}"#.to_string()); + return Ok(Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("Content-Type", "application/json") + .body(Body::from(body)) + .unwrap()); + } + + if request.priority { + let insert_pos = session + .command_queue + .iter() + .position(|r| !r.priority) + .unwrap_or(session.command_queue.len()); + session.command_queue.insert(insert_pos, request.clone()); + } else { + session.command_queue.push_back(request.clone()); + } + session.touch(); + session.emit_queue_update(); + + session.emit(ChatEvent::Ack { + client_request_id: request.client_request_id, + accepted: true, + result: Some(serde_json::json!({"queued": true})), + }); + + let queue_notify = session.queue_notify.clone(); + let processor_running = session.queue_processor_running.clone(); + drop(session); + + if !processor_running.swap(true, Ordering::SeqCst) { + tokio::spawn(process_command_queue(gcx, session_arc, processor_running)); + } else { + queue_notify.notify_one(); + } + + Ok(Response::builder() + .status(StatusCode::ACCEPTED) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"accepted"}"#)) + .unwrap()) +} + +pub async fn handle_v1_chat_cancel_queued( + Extension(gcx): Extension>>, + Path((chat_id, client_request_id)): Path<(String, String)>, +) -> Result, ScratchError> { + validate_trajectory_id(&chat_id)?; + + let sessions = { + let gcx_locked = gcx.read().await; + gcx_locked.chat_sessions.clone() + }; + + let session_arc = get_or_create_session_with_trajectory(gcx.clone(), &sessions, &chat_id).await; + let mut session = session_arc.lock().await; + + let initial_len = session.command_queue.len(); + session + .command_queue + .retain(|r| r.client_request_id != client_request_id); + + if session.command_queue.len() < initial_len { + session.touch(); + session.emit_queue_update(); + Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"cancelled"}"#)) + .unwrap()) + } else { + Ok(Response::builder() + .status(StatusCode::NOT_FOUND) + .header("Content-Type", "application/json") + .body(Body::from(r#"{"status":"not_found"}"#)) + .unwrap()) + } +} diff --git a/refact-agent/engine/src/chat/history_limit.rs b/refact-agent/engine/src/chat/history_limit.rs new file mode 100644 index 000000000..9e4d363e7 --- /dev/null +++ b/refact-agent/engine/src/chat/history_limit.rs @@ -0,0 +1,585 @@ +use std::collections::{HashMap, HashSet}; +use serde_json::Value; +use serde::{Serialize, Deserialize}; +use crate::call_validation::{ChatMessage, ChatContent, ContextFile, SamplingParameters}; +use crate::nicer_logs::first_n_chars; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum CompressionStrength { + Absent, + Low, + Medium, + High, +} + +pub(crate) fn remove_invalid_tool_calls_and_tool_calls_results(messages: &mut Vec) { + let tool_call_ids: HashSet<_> = messages + .iter() + .filter(|m| !m.tool_call_id.is_empty()) + .map(|m| &m.tool_call_id) + .cloned() + .collect(); + messages.retain(|m| { + if let Some(tool_calls) = &m.tool_calls { + let should_retain = tool_calls.iter().all(|tc| tool_call_ids.contains(&tc.id)); + if !should_retain { + tracing::warn!( + "removing assistant message with unanswered tool tool_calls: {:?}", + tool_calls + ); + } + should_retain + } else { + true + } + }); + + let tool_call_ids: HashSet<_> = messages + .iter() + .filter_map(|x| x.tool_calls.clone()) + .flatten() + .map(|x| x.id) + .collect(); + messages.retain(|m| { + let is_tool_result = m.role == "tool" || m.role == "diff"; + if is_tool_result && !m.tool_call_id.is_empty() && !tool_call_ids.contains(&m.tool_call_id) + { + tracing::warn!("removing tool result with no tool_call: {:?}", m); + false + } else { + true + } + }); + + // Remove duplicate tool results - keep only the last occurrence of each tool_call_id + // Anthropic API requires exactly one tool_result per tool_use + // For file edit operations, "diff" role typically comes after "tool" and contains cleaner output + // Only applies to actual tool results (role == "tool" or "diff"), not context_file markers + let mut last_occurrence: HashMap = HashMap::new(); + for (i, m) in messages.iter().enumerate() { + let is_tool_result = m.role == "tool" || m.role == "diff"; + if is_tool_result && !m.tool_call_id.is_empty() { + last_occurrence.insert(m.tool_call_id.clone(), i); + } + } + let indices_to_keep: HashSet = last_occurrence.values().cloned().collect(); + let mut current_idx = 0usize; + messages.retain(|m| { + let idx = current_idx; + current_idx += 1; + let is_tool_result = m.role == "tool" || m.role == "diff"; + if m.tool_call_id.is_empty() || !is_tool_result { + true + } else if indices_to_keep.contains(&idx) { + true + } else { + tracing::warn!( + "removing duplicate tool result (role={}) for tool_call_id: {}", + m.role, + m.tool_call_id + ); + false + } + }); +} + +/// Determines if two file contents have a duplication relationship (one contains the other). +/// Returns true if either content is substantially contained in the other. +pub(crate) fn is_content_duplicate( + current_content: &str, + current_line1: usize, + current_line2: usize, + first_content: &str, + first_line1: usize, + first_line2: usize, +) -> bool { + let lines_overlap = first_line1 <= current_line2 && first_line2 >= current_line1; + // If line ranges don't overlap at all, it's definitely not a duplicate + if !lines_overlap { + return false; + } + // Consider empty contents are not duplicate + if current_content.is_empty() || first_content.is_empty() { + return false; + } + // Check if either content is entirely contained in the other (symmetric check) + if first_content.contains(current_content) || current_content.contains(first_content) { + return true; + } + // Check for substantial line overlap (either direction) + let first_lines: HashSet<&str> = first_content + .lines() + .filter(|x| !x.starts_with("...")) + .collect(); + let current_lines: HashSet<&str> = current_content + .lines() + .filter(|x| !x.starts_with("...")) + .collect(); + let intersect_count = first_lines.intersection(¤t_lines).count(); + + // Either all of current's lines are in first, OR all of first's lines are in current + let current_in_first = !current_lines.is_empty() && intersect_count >= current_lines.len(); + let first_in_current = !first_lines.is_empty() && intersect_count >= first_lines.len(); + + current_in_first || first_in_current +} + +/// Stage 0: Compress duplicate ContextFiles based on content comparison - keeping the LARGEST occurrence +pub(crate) fn compress_duplicate_context_files( + messages: &mut Vec, +) -> Result<(usize, Vec), String> { + #[derive(Debug, Clone)] + struct ContextFileInfo { + msg_idx: usize, + cf_idx: usize, + file_name: String, + content: String, + line1: usize, + line2: usize, + content_len: usize, + is_compressed: bool, + } + + // First pass: collect information about all context files + let mut preserve_messages = vec![false; messages.len()]; + let mut all_files: Vec = Vec::new(); + for (msg_idx, msg) in messages.iter().enumerate() { + if msg.role != "context_file" { + continue; + } + let context_files: Vec = match &msg.content { + ChatContent::ContextFiles(files) => files.clone(), + ChatContent::SimpleText(text) => match serde_json::from_str(text) { + Ok(v) => v, + Err(e) => { + tracing::warn!( + "Stage 0: Failed to parse ContextFile JSON at index {}: {}. Skipping.", + msg_idx, + e + ); + continue; + } + }, + _ => { + tracing::warn!( + "Stage 0: Unexpected content type for context_file at index {}. Skipping.", + msg_idx + ); + continue; + } + }; + for (cf_idx, cf) in context_files.iter().enumerate() { + all_files.push(ContextFileInfo { + msg_idx, + cf_idx, + file_name: cf.file_name.clone(), + content: cf.file_content.clone(), + line1: cf.line1, + line2: cf.line2, + content_len: cf.file_content.len(), + is_compressed: false, + }); + } + } + + // Group occurrences by file name + let mut files_by_name: HashMap> = HashMap::new(); + for (i, file) in all_files.iter().enumerate() { + files_by_name + .entry(file.file_name.clone()) + .or_insert_with(Vec::new) + .push(i); + } + + // Process each file's occurrences - keep the LARGEST one (prefer earlier if tied) + for (filename, indices) in &files_by_name { + if indices.len() <= 1 { + continue; + } + + // Find the index with the largest content; if tied, prefer earlier message (smaller msg_idx) + let best_idx = *indices + .iter() + .max_by(|&&a, &&b| { + let size_cmp = all_files[a].content_len.cmp(&all_files[b].content_len); + if size_cmp == std::cmp::Ordering::Equal { + // When sizes equal, prefer EARLIER occurrence (smaller msg_idx) + all_files[b].msg_idx.cmp(&all_files[a].msg_idx) + } else { + size_cmp + } + }) + .unwrap(); + let best_msg_idx = all_files[best_idx].msg_idx; + preserve_messages[best_msg_idx] = true; + + tracing::info!( + "Stage 0: File {} - preserving best occurrence at message index {} ({} bytes)", + filename, + best_msg_idx, + all_files[best_idx].content_len + ); + + // Mark all other occurrences that are duplicates (subsets) of the best one for compression + for &curr_idx in indices { + if curr_idx == best_idx { + continue; + } + let current_msg_idx = all_files[curr_idx].msg_idx; + let content_is_duplicate = is_content_duplicate( + &all_files[curr_idx].content, + all_files[curr_idx].line1, + all_files[curr_idx].line2, + &all_files[best_idx].content, + all_files[best_idx].line1, + all_files[best_idx].line2, + ); + if content_is_duplicate { + all_files[curr_idx].is_compressed = true; + tracing::info!("Stage 0: Marking for compression - duplicate/subset of file {} at message index {} ({} bytes)", + filename, current_msg_idx, all_files[curr_idx].content_len); + } else { + tracing::info!("Stage 0: Not compressing - unique content of file {} at message index {} (non-overlapping)", + filename, current_msg_idx); + } + } + } + + // Apply compressions to messages + let mut compressed_count = 0; + let mut modified_messages: HashSet = HashSet::new(); + for file in &all_files { + if file.is_compressed && !modified_messages.contains(&file.msg_idx) { + let context_files: Vec = match &messages[file.msg_idx].content { + ChatContent::ContextFiles(files) => files.clone(), + ChatContent::SimpleText(text) => serde_json::from_str(text).unwrap_or_default(), + _ => vec![], + }; + + let mut remaining_files = Vec::new(); + let mut compressed_files = Vec::new(); + + for (cf_idx, cf) in context_files.iter().enumerate() { + if all_files + .iter() + .any(|f| f.msg_idx == file.msg_idx && f.cf_idx == cf_idx && f.is_compressed) + { + compressed_files.push(format!("{}", cf.file_name)); + } else { + remaining_files.push(cf.clone()); + } + } + + if !compressed_files.is_empty() { + let compressed_files_str = compressed_files.join(", "); + if remaining_files.is_empty() { + let summary = format!("💿 Duplicate files compressed: '{}' files were shown earlier in the conversation history. Do not ask for these files again.", compressed_files_str); + messages[file.msg_idx].content = ChatContent::SimpleText(summary); + messages[file.msg_idx].role = "cd_instruction".to_string(); + tracing::info!( + "Stage 0: Fully compressed ContextFile at index {}: all {} files removed", + file.msg_idx, + compressed_files.len() + ); + } else { + let new_content = serde_json::to_string(&remaining_files) + .expect("serialization of filtered ContextFiles failed"); + messages[file.msg_idx].content = ChatContent::SimpleText(new_content); + tracing::info!("Stage 0: Partially compressed ContextFile at index {}: {} files removed, {} files kept", + file.msg_idx, compressed_files.len(), remaining_files.len()); + } + + compressed_count += compressed_files.len(); + modified_messages.insert(file.msg_idx); + } + } + } + + Ok((compressed_count, preserve_messages)) +} + +fn replace_broken_tool_call_messages( + messages: &mut Vec, + sampling_parameters: &mut SamplingParameters, + new_max_new_tokens: usize, +) { + let high_budget_tools = vec!["create_textdoc"]; + let last_index_assistant = messages + .iter() + .rposition(|msg| msg.role == "assistant") + .unwrap_or(0); + for (i, message) in messages.iter_mut().enumerate() { + if let Some(tool_calls) = &mut message.tool_calls { + let incorrect_reasons = tool_calls + .iter() + .map(|tc| { + match serde_json::from_str::>(&tc.function.arguments) { + Ok(_) => None, + Err(err) => Some(format!( + "broken {}({}): {}", + tc.function.name, + first_n_chars(&tc.function.arguments, 100), + err + )), + } + }) + .filter_map(|x| x) + .collect::>(); + let has_high_budget_tools = tool_calls + .iter() + .any(|tc| high_budget_tools.contains(&tc.function.name.as_str())); + if !incorrect_reasons.is_empty() { + // Only increase max_new_tokens if this is the last message and it was truncated due to "length" + let extra_message = if i == last_index_assistant + && message.finish_reason == Some("length".to_string()) + { + tracing::warn!( + "increasing `max_new_tokens` from {} to {}", + sampling_parameters.max_new_tokens, + new_max_new_tokens + ); + let tokens_msg = if sampling_parameters.max_new_tokens < new_max_new_tokens { + sampling_parameters.max_new_tokens = new_max_new_tokens; + format!("The message was stripped (finish_reason=`length`), the tokens budget was too small for the tool calls. Increasing `max_new_tokens` to {new_max_new_tokens}.") + } else { + "The message was stripped (finish_reason=`length`), the tokens budget cannot fit those tool calls.".to_string() + }; + if has_high_budget_tools { + format!("{tokens_msg} Try to make changes one by one (ie using `update_textdoc()`).") + } else { + format!("{tokens_msg} Change your strategy.") + } + } else { + "".to_string() + }; + + let incorrect_reasons_concat = incorrect_reasons.join("\n"); + message.role = "cd_instruction".to_string(); + message.content = ChatContent::SimpleText(format!("💿 Previous tool calls are not valid: {incorrect_reasons_concat}.\n{extra_message}")); + message.tool_calls = None; + tracing::warn!( + "tool calls are broken, converting the tool call message to the `cd_instruction`:\n{:?}", + message.content.content_text_only() + ); + } + } + } +} + +fn validate_chat_history(messages: &Vec) -> Result, String> { + // 1. Check that there is at least one message (and that at least one is "system" or "user") + if messages.is_empty() { + return Err("Invalid chat history: no messages present".to_string()); + } + let has_system_or_user = messages + .iter() + .any(|msg| msg.role == "system" || msg.role == "user"); + if !has_system_or_user { + return Err( + "Invalid chat history: must have at least one message of role 'system' or 'user'" + .to_string(), + ); + } + + // 2. The first message must be system or user. + if messages[0].role != "system" && messages[0].role != "user" { + return Err(format!( + "Invalid chat history: first message must be 'system' or 'user', got '{}'", + messages[0].role + )); + } + + // 3. For every tool call in any message, verify its function arguments are parseable. + for (msg_idx, msg) in messages.iter().enumerate() { + if let Some(tool_calls) = &msg.tool_calls { + for tc in tool_calls { + if let Err(e) = tc.function.parse_args() { + return Err(format!( + "Message at index {} has an unparseable tool call arguments for tool '{}': {} (arguments: {})", + msg_idx, tc.function.name, e, tc.function.arguments)); + } + } + } + } + + // 4. For each assistant message with nonempty tool_calls, + // check that every tool call id mentioned is later (i.e. at a higher index) answered by a tool message. + for (idx, msg) in messages.iter().enumerate() { + if msg.role == "assistant" { + if let Some(tool_calls) = &msg.tool_calls { + if !tool_calls.is_empty() { + for tc in tool_calls { + // Look for a following "tool" message whose tool_call_id equals tc.id + let mut found = false; + for later_msg in messages.iter().skip(idx + 1) { + if later_msg.tool_call_id == tc.id { + found = true; + break; + } + } + if !found { + return Err(format!( + "Assistant message at index {} has a tool call id '{}' that is unresponded (no following tool message with that id)", + idx, tc.id + )); + } + } + } + } + } + } + Ok(messages.to_vec()) +} + +pub fn fix_and_limit_messages_history( + messages: &Vec, + sampling_parameters_to_patch: &mut SamplingParameters, +) -> Result, String> { + let mut mutable_messages = messages.clone(); + replace_broken_tool_call_messages(&mut mutable_messages, sampling_parameters_to_patch, 16000); + remove_invalid_tool_calls_and_tool_calls_results(&mut mutable_messages); + validate_chat_history(&mutable_messages) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::call_validation::{ChatToolCall, ChatToolFunction}; + + #[test] + fn test_is_content_duplicate_overlapping_ranges() { + let content1 = "line1\nline2\nline3"; + let content2 = "line2\nline3"; + assert!(is_content_duplicate(content1, 1, 3, content2, 2, 3)); + } + + #[test] + fn test_is_content_duplicate_non_overlapping_ranges() { + let content1 = "line1\nline2"; + let content2 = "line5\nline6"; + assert!(!is_content_duplicate(content1, 1, 2, content2, 5, 6)); + } + + #[test] + fn test_is_content_duplicate_empty_content() { + assert!(!is_content_duplicate("", 1, 10, "content", 1, 10)); + assert!(!is_content_duplicate("content", 1, 10, "", 1, 10)); + } + + #[test] + fn test_is_content_duplicate_substring_containment() { + let small = "line2\nline3"; + let large = "line1\nline2\nline3\nline4"; + assert!(is_content_duplicate(small, 2, 3, large, 1, 4)); + assert!(is_content_duplicate(large, 1, 4, small, 2, 3)); + } + + #[test] + fn test_is_content_duplicate_exact_match() { + let content = "line1\nline2"; + assert!(is_content_duplicate(content, 1, 2, content, 1, 2)); + } + + #[test] + fn test_is_content_duplicate_ignores_ellipsis_lines() { + let content1 = "...\nreal_line\n..."; + let content2 = "real_line"; + assert!(is_content_duplicate(content1, 1, 3, content2, 1, 1)); + } + + #[test] + fn test_remove_invalid_tool_calls_removes_unanswered() { + let mut messages = vec![ChatMessage { + role: "assistant".to_string(), + tool_calls: Some(vec![ChatToolCall { + id: "call_1".to_string(), + index: Some(0), + function: ChatToolFunction { + name: "test".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }]; + remove_invalid_tool_calls_and_tool_calls_results(&mut messages); + assert!(messages.is_empty()); + } + + #[test] + fn test_remove_invalid_tool_calls_keeps_answered() { + let mut messages = vec![ + ChatMessage { + role: "assistant".to_string(), + tool_calls: Some(vec![ChatToolCall { + id: "call_1".to_string(), + index: Some(0), + function: ChatToolFunction { + name: "test".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }, + ChatMessage { + role: "tool".to_string(), + tool_call_id: "call_1".to_string(), + content: ChatContent::SimpleText("result".to_string()), + ..Default::default() + }, + ]; + remove_invalid_tool_calls_and_tool_calls_results(&mut messages); + assert_eq!(messages.len(), 2); + } + + #[test] + fn test_remove_invalid_tool_calls_removes_orphan_results() { + let mut messages = vec![ChatMessage { + role: "tool".to_string(), + tool_call_id: "nonexistent_call".to_string(), + content: ChatContent::SimpleText("orphan result".to_string()), + ..Default::default() + }]; + remove_invalid_tool_calls_and_tool_calls_results(&mut messages); + assert!(messages.is_empty()); + } + + #[test] + fn test_remove_invalid_tool_calls_keeps_last_duplicate() { + let mut messages = vec![ + ChatMessage { + role: "assistant".to_string(), + tool_calls: Some(vec![ChatToolCall { + id: "call_1".to_string(), + index: Some(0), + function: ChatToolFunction { + name: "test".to_string(), + arguments: "{}".to_string(), + }, + tool_type: "function".to_string(), + extra_content: None, + }]), + ..Default::default() + }, + ChatMessage { + role: "tool".to_string(), + tool_call_id: "call_1".to_string(), + content: ChatContent::SimpleText("first result".to_string()), + ..Default::default() + }, + ChatMessage { + role: "diff".to_string(), + tool_call_id: "call_1".to_string(), + content: ChatContent::SimpleText("second result (diff)".to_string()), + ..Default::default() + }, + ]; + remove_invalid_tool_calls_and_tool_calls_results(&mut messages); + assert_eq!(messages.len(), 2); + assert_eq!(messages[1].role, "diff"); + } +} diff --git a/refact-agent/engine/src/chat/linearize.rs b/refact-agent/engine/src/chat/linearize.rs new file mode 100644 index 000000000..44d47582d --- /dev/null +++ b/refact-agent/engine/src/chat/linearize.rs @@ -0,0 +1,1225 @@ +use crate::call_validation::{ChatContent, ChatMessage}; +use crate::scratchpads::multimodality::MultimodalElement; + +const TOOL_APPENDABLE_ROLES: &[&str] = &["context_file", "plain_text", "cd_instruction"]; +const TOOL_ROLES: &[&str] = &["tool", "diff"]; +const MERGE_SEPARATOR: &str = "\n\n"; + +fn is_tool_appendable(role: &str) -> bool { + TOOL_APPENDABLE_ROLES.contains(&role) +} + +fn is_tool_role(role: &str) -> bool { + TOOL_ROLES.contains(&role) +} + +fn content_to_elements(content: &ChatContent) -> Vec { + match content { + ChatContent::SimpleText(text) => { + if text.is_empty() { + vec![] + } else { + vec![MultimodalElement { + m_type: "text".to_string(), + m_content: text.clone(), + }] + } + } + ChatContent::Multimodal(elements) => { + elements.iter().filter(|el| { + !(el.is_text() && el.m_content.is_empty()) + }).cloned().collect() + } + ChatContent::ContextFiles(_) => { + let text = content.content_text_only(); + if text.is_empty() { + vec![] + } else { + vec![MultimodalElement { + m_type: "text".to_string(), + m_content: text, + }] + } + } + } +} + +fn elements_to_content(elements: Vec) -> ChatContent { + if elements.is_empty() { + return ChatContent::SimpleText(String::new()); + } + + if elements.iter().any(|el| !el.is_text()) { + ChatContent::Multimodal(elements) + } else { + let text = elements + .iter() + .map(|el| el.m_content.as_str()) + .collect::>() + .join(MERGE_SEPARATOR); + ChatContent::SimpleText(text) + } +} + +fn merge_user_like_group(group: Vec) -> ChatMessage { + debug_assert!(!group.is_empty()); + + if group.len() == 1 { + let mut msg = group.into_iter().next().unwrap(); + if msg.role != "user" { + msg.role = "user".to_string(); + } + return msg; + } + + let mut all_elements: Vec = Vec::new(); + + for msg in &group { + let elements = content_to_elements(&msg.content); + if elements.is_empty() { + continue; + } + if !all_elements.is_empty() { + let last_is_text = all_elements.last().map_or(false, |el| el.is_text()); + let next_is_text = elements.first().map_or(false, |el| el.is_text()); + if last_is_text && next_is_text { + if let Some(last) = all_elements.last_mut() { + last.m_content.push_str(MERGE_SEPARATOR); + last.m_content.push_str(&elements[0].m_content); + all_elements.extend(elements.into_iter().skip(1)); + continue; + } + } + } + all_elements.extend(elements); + } + + let mut merged = group[0].clone(); + merged.role = "user".to_string(); + merged.content = elements_to_content(all_elements); + merged.tool_calls = None; + merged.tool_call_id = String::new(); + merged.thinking_blocks = None; + merged.reasoning_content = None; + merged +} + +/// Appends content from a tool-appendable message (context_file, plain_text, cd_instruction) +/// into an existing tool/diff message's text content. +fn append_to_tool_message(tool_msg: &mut ChatMessage, appendable: &ChatMessage) { + let extra_text = match &appendable.content { + ChatContent::SimpleText(text) => text.clone(), + ChatContent::ContextFiles(_) => appendable.content.content_text_only(), + ChatContent::Multimodal(elements) => { + elements.iter() + .filter(|el| el.is_text()) + .map(|el| el.m_content.as_str()) + .collect::>() + .join(MERGE_SEPARATOR) + } + }; + if extra_text.is_empty() { + return; + } + match &mut tool_msg.content { + ChatContent::SimpleText(text) => { + if !text.is_empty() { + text.push_str(MERGE_SEPARATOR); + } + text.push_str(&extra_text); + } + _ => { + let existing = tool_msg.content.content_text_only(); + let mut combined = existing; + if !combined.is_empty() { + combined.push_str(MERGE_SEPARATOR); + } + combined.push_str(&extra_text); + tool_msg.content = ChatContent::SimpleText(combined); + } + } +} + +/// Merges consecutive user-like messages and folds tool-appendable messages +/// (context_file, plain_text, cd_instruction) into preceding tool/diff messages +/// for cache-friendly LLM requests. Idempotent and deterministic. +/// +/// Rules: +/// - context_file/plain_text/cd_instruction after tool/diff → appended to last tool msg +/// - consecutive user-like messages → merged into single "user" message +/// - real "user" message after tool → starts a new user group (not folded into tool) +pub fn linearize_thread_for_llm(messages: &[ChatMessage]) -> Vec { + let mut result: Vec = Vec::new(); + let mut user_group: Vec = Vec::new(); + + for msg in messages { + if is_tool_appendable(&msg.role) { + if !user_group.is_empty() { + // Already accumulating user-like messages, keep accumulating + user_group.push(msg.clone()); + } else if let Some(last) = result.last_mut() { + if is_tool_role(&last.role) { + // Fold into the preceding tool/diff message + append_to_tool_message(last, msg); + } else { + // After system/assistant/etc — start a user group + user_group.push(msg.clone()); + } + } else { + // First message in the thread + user_group.push(msg.clone()); + } + } else if msg.role == "user" { + // Real user message always goes into user group + user_group.push(msg.clone()); + } else { + // Non-user-like role (system, assistant, tool, diff) + if !user_group.is_empty() { + result.push(merge_user_like_group(std::mem::take(&mut user_group))); + } + result.push(msg.clone()); + } + } + + if !user_group.is_empty() { + result.push(merge_user_like_group(user_group)); + } + + result +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::call_validation::{ChatMessage, ChatContent, ContextFile}; + use crate::scratchpads::multimodality::MultimodalElement; + + fn text_msg(role: &str, text: &str) -> ChatMessage { + ChatMessage { + role: role.to_string(), + content: ChatContent::SimpleText(text.to_string()), + ..Default::default() + } + } + + fn text_msg_with_id(role: &str, text: &str, id: &str) -> ChatMessage { + ChatMessage { + message_id: id.to_string(), + role: role.to_string(), + content: ChatContent::SimpleText(text.to_string()), + ..Default::default() + } + } + + fn context_file_msg(files: Vec<(&str, &str, usize, usize)>) -> ChatMessage { + ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles( + files + .into_iter() + .map(|(name, content, l1, l2)| ContextFile { + file_name: name.to_string(), + file_content: content.to_string(), + line1: l1, + line2: l2, + ..Default::default() + }) + .collect(), + ), + ..Default::default() + } + } + + fn multimodal_msg(role: &str, elements: Vec<(&str, &str)>) -> ChatMessage { + ChatMessage { + role: role.to_string(), + content: ChatContent::Multimodal( + elements + .into_iter() + .map(|(t, c)| MultimodalElement { + m_type: t.to_string(), + m_content: c.to_string(), + }) + .collect(), + ), + ..Default::default() + } + } + + fn assistant_msg(text: &str) -> ChatMessage { + text_msg("assistant", text) + } + + fn tool_msg(text: &str, tool_call_id: &str) -> ChatMessage { + ChatMessage { + role: "tool".to_string(), + content: ChatContent::SimpleText(text.to_string()), + tool_call_id: tool_call_id.to_string(), + ..Default::default() + } + } + + #[test] + fn test_no_merge_needed_simple_alternation() { + let msgs = vec![ + text_msg("system", "You are helpful"), + text_msg("user", "Hello"), + assistant_msg("Hi there"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 3); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + assert_eq!(result[2].role, "assistant"); + } + + #[test] + fn test_merge_consecutive_user_messages() { + let msgs = vec![ + text_msg("system", "You are helpful"), + text_msg("user", "First part"), + text_msg("user", "Second part"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + assert_eq!( + result[1].content.content_text_only(), + "First part\n\nSecond part" + ); + } + + #[test] + fn test_merge_context_file_with_user() { + let msgs = vec![ + text_msg("system", "You are helpful"), + context_file_msg(vec![("src/main.rs", "fn main() {}", 1, 1)]), + text_msg("user", "Fix the bug"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("src/main.rs:1-1")); + assert!(text.contains("fn main() {}")); + assert!(text.contains("Fix the bug")); + } + + #[test] + fn test_merge_multiple_context_files_and_user() { + let msgs = vec![ + text_msg("system", "System prompt"), + context_file_msg(vec![("a.rs", "aaa", 1, 3)]), + context_file_msg(vec![("b.rs", "bbb", 1, 5)]), + text_msg("plain_text", "Some plain text"), + text_msg("user", "Do something"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("a.rs:1-3")); + assert!(text.contains("b.rs:1-5")); + assert!(text.contains("Some plain text")); + assert!(text.contains("Do something")); + } + + #[test] + fn test_merge_cd_instruction_with_user() { + let msgs = vec![ + text_msg("system", "System"), + text_msg("cd_instruction", "cd /project"), + text_msg("user", "List files"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "user"); + assert_eq!( + result[1].content.content_text_only(), + "cd /project\n\nList files" + ); + } + + + + #[test] + fn test_no_merge_across_assistant_boundary() { + let msgs = vec![ + text_msg("user", "First question"), + assistant_msg("First answer"), + context_file_msg(vec![("c.rs", "code", 1, 10)]), + text_msg("user", "Second question"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 3); // user, assistant, user(merged) + assert_eq!(result[0].role, "user"); + assert_eq!(result[1].role, "assistant"); + assert_eq!(result[2].role, "user"); + let merged_text = result[2].content.content_text_only(); + assert!(merged_text.contains("c.rs:1-10")); + assert!(merged_text.contains("Second question")); + } + + + + #[test] + fn test_tool_messages_not_merged() { + let msgs = vec![ + text_msg("user", "Do something"), + assistant_msg("Calling tool"), + tool_msg("Tool result 1", "call_1"), + tool_msg("Tool result 2", "call_2"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 4); + assert_eq!(result[2].role, "tool"); + assert_eq!(result[2].tool_call_id, "call_1"); + assert_eq!(result[3].role, "tool"); + assert_eq!(result[3].tool_call_id, "call_2"); + } + + #[test] + fn test_tool_loop_pattern_preserved() { + let msgs = vec![ + text_msg("system", "System"), + text_msg("user", "Fix bug"), + assistant_msg("Let me check"), + tool_msg("file contents", "call_1"), + assistant_msg("Now I'll patch"), + tool_msg("patch applied", "call_2"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 6); + // Exact same structure — nothing to merge + for (i, (orig, lin)) in msgs.iter().zip(result.iter()).enumerate() { + assert_eq!(orig.role, lin.role, "Role mismatch at index {}", i); + } + } + + + + #[test] + fn test_idempotency_simple() { + let msgs = vec![ + text_msg("system", "System"), + context_file_msg(vec![("a.rs", "aaa", 1, 3)]), + text_msg("user", "Hello"), + assistant_msg("Hi"), + ]; + let first = linearize_thread_for_llm(&msgs); + let second = linearize_thread_for_llm(&first); + assert_eq!(first.len(), second.len()); + for (a, b) in first.iter().zip(second.iter()) { + assert_eq!(a.role, b.role); + assert_eq!(a.content.content_text_only(), b.content.content_text_only()); + } + } + + #[test] + fn test_idempotency_complex() { + let msgs = vec![ + text_msg("system", "System"), + context_file_msg(vec![("a.rs", "aaa", 1, 3)]), + context_file_msg(vec![("b.rs", "bbb", 4, 6)]), + text_msg("cd_instruction", "cd /tmp"), + text_msg("user", "Do it"), + assistant_msg("OK"), + text_msg("plain_text", "Extra info"), + text_msg("user", "More"), + ]; + let first = linearize_thread_for_llm(&msgs); + let second = linearize_thread_for_llm(&first); + assert_eq!(first.len(), second.len()); + for (a, b) in first.iter().zip(second.iter()) { + assert_eq!(a.role, b.role); + assert_eq!(a.content.content_text_only(), b.content.content_text_only()); + } + } + + + + #[test] + fn test_multimodal_image_preserved() { + let msgs = vec![ + text_msg("system", "System"), + multimodal_msg("user", vec![("text", "Look at this"), ("image/png", "base64data")]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + match &result[1].content { + ChatContent::Multimodal(elements) => { + assert_eq!(elements.len(), 2); + assert!(elements[0].is_text()); + assert!(elements[1].is_image()); + } + _ => panic!("Expected Multimodal content"), + } + } + + #[test] + fn test_merge_text_with_multimodal() { + let msgs = vec![ + text_msg("system", "System"), + text_msg("user", "Context info"), + multimodal_msg("user", vec![("text", "Look at this"), ("image/png", "imgdata")]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + match &result[1].content { + ChatContent::Multimodal(elements) => { + // "Context info" + separator + "Look at this" merged into one text, then image + assert_eq!(elements.len(), 2); + assert!(elements[0].is_text()); + assert!(elements[0].m_content.contains("Context info")); + assert!(elements[0].m_content.contains("Look at this")); + assert!(elements[1].is_image()); + assert_eq!(elements[1].m_content, "imgdata"); + } + _ => panic!("Expected Multimodal content"), + } + } + + #[test] + fn test_merge_context_file_with_multimodal() { + let msgs = vec![ + context_file_msg(vec![("x.rs", "code", 1, 5)]), + multimodal_msg("user", vec![("text", "Describe"), ("image/png", "img")]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 1); + match &result[0].content { + ChatContent::Multimodal(elements) => { + assert!(elements[0].is_text()); + assert!(elements[0].m_content.contains("x.rs:1-5")); + assert!(elements[0].m_content.contains("Describe")); + assert!(elements[1].is_image()); + } + _ => panic!("Expected Multimodal content"), + } + } + + + + #[test] + fn test_empty_user_message_skipped_in_merge() { + let msgs = vec![ + text_msg("system", "System"), + text_msg("user", ""), + text_msg("user", "Real content"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].content.content_text_only(), "Real content"); + } + + #[test] + fn test_all_empty_user_messages() { + let msgs = vec![ + text_msg("system", "System"), + text_msg("user", ""), + text_msg("context_file", ""), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "user"); + } + + + + #[test] + fn test_empty_input() { + let result = linearize_thread_for_llm(&[]); + assert!(result.is_empty()); + } + + #[test] + fn test_single_user_message() { + let msgs = vec![text_msg("user", "Hello")]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 1); + assert_eq!(result[0].role, "user"); + assert_eq!(result[0].content.content_text_only(), "Hello"); + } + + #[test] + fn test_single_context_file_becomes_user() { + let msgs = vec![context_file_msg(vec![("f.rs", "code", 1, 1)])]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 1); + assert_eq!(result[0].role, "user"); + } + + #[test] + fn test_system_not_merged_with_user() { + let msgs = vec![ + text_msg("system", "System prompt"), + text_msg("user", "User message"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + } + + #[test] + fn test_diff_messages_not_merged() { + let msgs = vec![ + assistant_msg("Patching"), + ChatMessage { + role: "diff".to_string(), + content: ChatContent::SimpleText("diff content".to_string()), + tool_call_id: "call_1".to_string(), + ..Default::default() + }, + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "diff"); + } + + #[test] + fn test_message_id_preserved_from_first() { + let msgs = vec![ + text_msg_with_id("user", "First", "msg-001"), + text_msg_with_id("user", "Second", "msg-002"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 1); + assert_eq!(result[0].message_id, "msg-001"); + } + + + + #[test] + fn test_deterministic_output() { + let msgs = vec![ + text_msg("system", "System"), + context_file_msg(vec![ + ("a.rs", "fn a() {}", 1, 1), + ("b.rs", "fn b() {}", 1, 1), + ]), + text_msg("cd_instruction", "cd /project"), + text_msg("user", "Fix everything"), + ]; + + // Run multiple times, output must be identical + let result1 = linearize_thread_for_llm(&msgs); + let result2 = linearize_thread_for_llm(&msgs); + let result3 = linearize_thread_for_llm(&msgs); + + for i in 0..result1.len() { + assert_eq!(result1[i].content.content_text_only(), result2[i].content.content_text_only()); + assert_eq!(result2[i].content.content_text_only(), result3[i].content.content_text_only()); + } + } + + + + #[test] + fn test_realistic_agentic_flow() { + // Simulates: system + project context + knowledge + user question + // then tool loop with strict alternation + let msgs = vec![ + text_msg("system", "You are a coding assistant"), + context_file_msg(vec![("project/README.md", "# Project", 1, 1)]), + context_file_msg(vec![("src/lib.rs", "pub mod auth;", 1, 1)]), + text_msg("user", "Fix the auth bug"), + assistant_msg("Let me look at the auth module"), + tool_msg("pub fn login() { ... }", "call_1"), + assistant_msg("I see the issue, let me patch it"), + tool_msg("Patch applied successfully", "call_2"), + ]; + let result = linearize_thread_for_llm(&msgs); + + // Expected: system, user(merged 3), assistant, tool, assistant, tool + assert_eq!(result.len(), 6); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + assert_eq!(result[2].role, "assistant"); + assert_eq!(result[3].role, "tool"); + assert_eq!(result[4].role, "assistant"); + assert_eq!(result[5].role, "tool"); + + // The merged user message should contain all context + question + let user_text = result[1].content.content_text_only(); + assert!(user_text.contains("project/README.md")); + assert!(user_text.contains("src/lib.rs")); + assert!(user_text.contains("Fix the auth bug")); + } + + + /// 197x: user→user (conversation continuation / handoff messages) + #[test] + fn test_real_user_user_handoff() { + let msgs = vec![ + text_msg("system", "You are Refact Agent"), + context_file_msg(vec![("knowledge.md", "prior context", 1, 4)]), + text_msg("user", "## Previous conversation summary\n\nUser requested auth fix"), + text_msg("user", "The previous trajectory abc-123. Continue from where you stopped."), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); // system + merged user + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("knowledge.md")); + assert!(text.contains("Previous conversation summary")); + assert!(text.contains("Continue from where you stopped")); + } + + /// 55x: cf, cf, user, user (context files + multi-part user input) + #[test] + fn test_real_cf_cf_user_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("AGENTS.md", "agent config", 1, 10)]), + context_file_msg(vec![("knowledge.md", "cached knowledge", 1, 4)]), + text_msg("user", "## Previous conversation summary\n\nUser worked on providers"), + text_msg("user", "The previous trajectory xyz-789. Continue."), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + let text = result[1].content.content_text_only(); + assert!(text.contains("AGENTS.md")); + assert!(text.contains("knowledge.md")); + assert!(text.contains("Previous conversation")); + assert!(text.contains("Continue")); + } + + /// 36x: cf, cf only (context-only without final user message) + #[test] + fn test_real_cf_cf_no_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("file1.rs", "code1", 1, 10)]), + context_file_msg(vec![("file2.rs", "code2", 1, 5)]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); // system + merged user + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("file1.rs")); + assert!(text.contains("file2.rs")); + } + + /// 7x: cf, cf, user, user, cd_instruction (mode transition instruction) + #[test] + fn test_real_cf_cf_user_user_cd_instruction() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("knowledge.md", "cached", 1, 4)]), + context_file_msg(vec![("prepare.rs", "code", 1, 100)]), + text_msg("user", "## Summary\n\nUser wants providers page"), + text_msg("user", "Continue from trajectory abc."), + text_msg("cd_instruction", "💿 Now confirm the plan with the user"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("knowledge.md")); + assert!(text.contains("prepare.rs")); + assert!(text.contains("Summary")); + assert!(text.contains("Continue from trajectory")); + assert!(text.contains("💿 Now confirm the plan")); + } + + /// 3x: user, cf, user (interleaved — user asks, context injected, user continues) + #[test] + fn test_real_interleaved_user_cf_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "avoid comments though"), + context_file_msg(vec![("trajectory.json", "{}", 1, 100)]), + text_msg("user", "also add tests to backend"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + let text = result[1].content.content_text_only(); + assert!(text.contains("avoid comments")); + assert!(text.contains("trajectory.json")); + assert!(text.contains("also add tests")); + } + + /// 2x: context_file with string content (not ContextFiles variant!) + #[test] + fn test_real_context_file_as_string() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("context_file", "some pre-formatted file content here"), + text_msg("user", "explain this"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + let text = result[1].content.content_text_only(); + assert!(text.contains("some pre-formatted file content")); + assert!(text.contains("explain this")); + } + + /// 4x: 9+ context_files in a row (heavy context injection) + #[test] + fn test_real_many_context_files() { + let mut msgs = vec![text_msg("system", "system prompt")]; + for i in 0..9 { + msgs.push(context_file_msg(vec![ + (&format!("file{i}.rs"), &format!("content {i}"), 1, 10), + ])); + } + msgs.push(text_msg("user", "Fix everything")); + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); // system + one merged user + let text = result[1].content.content_text_only(); + for i in 0..9 { + assert!(text.contains(&format!("file{i}.rs")), "Missing file{i}.rs"); + } + assert!(text.contains("Fix everything")); + } + + /// 235x: tool → context_file, user → assistant (mid-conversation context injection) + #[test] + fn test_real_tool_then_cf_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "Find the bug"), + assistant_msg("Let me search"), + tool_msg("found: auth.rs has issue", "call_1"), + context_file_msg(vec![("auth.rs", "fn login() {}", 1, 5)]), + text_msg("user", "Fix that function"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool(+cf), user + assert_eq!(result.len(), 5); + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + assert_eq!(result[2].role, "assistant"); + assert_eq!(result[3].role, "tool"); + let tool_text = result[3].content.content_text_only(); + assert!(tool_text.contains("found: auth.rs has issue")); + assert!(tool_text.contains("auth.rs")); + assert_eq!(result[4].role, "user"); + assert_eq!(result[4].content.content_text_only(), "Fix that function"); + } + + /// tool → cf, cd, cf, user: cf+cd+cf fold into tool, user stays separate + #[test] + fn test_real_tool_then_cf_cd_cf_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "start"), + assistant_msg("calling tool"), + tool_msg("tool output here", "call_1"), + context_file_msg(vec![("file1.rs", "code1", 1, 10)]), + text_msg("cd_instruction", "💿 Review complete"), + context_file_msg(vec![("file2.rs", "code2", 1, 5)]), + text_msg("user", "now fix it"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool(+cf+cd+cf), user + assert_eq!(result.len(), 5); + assert_eq!(result[3].role, "tool"); + let tool_text = result[3].content.content_text_only(); + assert!(tool_text.contains("tool output here")); + assert!(tool_text.contains("file1.rs")); + assert!(tool_text.contains("💿 Review complete")); + assert!(tool_text.contains("file2.rs")); + assert_eq!(result[4].role, "user"); + assert_eq!(result[4].content.content_text_only(), "now fix it"); + } + + /// 4x: plain_text role with directory tree content + #[test] + fn test_real_plain_text_directory_tree() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "show me the project"), + assistant_msg("Here's the tree"), + text_msg("plain_text", "/\n home/\n svakhreev/\n projects/\n refact/"), + text_msg("user", "now explain the structure"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, merged(plain_text+user) + assert_eq!(result.len(), 4); + assert_eq!(result[3].role, "user"); + let text = result[3].content.content_text_only(); + assert!(text.contains("home/")); + assert!(text.contains("now explain")); + } + + /// 16x: trailing cf, cf, user with no assistant after (END of thread) + #[test] + fn test_real_trailing_sequence_no_assistant() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("k1.md", "knowledge 1", 1, 4)]), + context_file_msg(vec![("k2.md", "knowledge 2", 1, 4)]), + text_msg("user", "Start working on the task"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); + assert_eq!(result[1].role, "user"); + let text = result[1].content.content_text_only(); + assert!(text.contains("k1.md")); + assert!(text.contains("k2.md")); + assert!(text.contains("Start working")); + } + + /// 280x: system → cf, cf, cf, user → assistant (most common initial pattern) + #[test] + fn test_real_system_3cf_user_assistant() { + let msgs = vec![ + text_msg("system", "You are Refact Agent"), + context_file_msg(vec![("AGENTS.md", "agent guidelines", 1, 50)]), + context_file_msg(vec![("knowledge1.md", "prior work", 1, 4)]), + context_file_msg(vec![("knowledge2.md", "more context", 1, 4)]), + text_msg("user", "implement the feature"), + assistant_msg("I'll start by analyzing the codebase"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 3); // system, merged user, assistant + assert_eq!(result[0].role, "system"); + assert_eq!(result[1].role, "user"); + assert_eq!(result[2].role, "assistant"); + let text = result[1].content.content_text_only(); + assert!(text.contains("AGENTS.md")); + assert!(text.contains("knowledge1.md")); + assert!(text.contains("knowledge2.md")); + assert!(text.contains("implement the feature")); + } + + /// 87x: tool → user, user → assistant (multi-user after tool) + #[test] + fn test_real_tool_then_user_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "start"), + assistant_msg("checking"), + tool_msg("result data", "call_1"), + text_msg("user", "## Previous conversation summary\n\nWorked on auth"), + text_msg("user", "Continue from trajectory abc."), + assistant_msg("continuing"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool, merged(user+user), assistant + assert_eq!(result.len(), 6); + assert_eq!(result[4].role, "user"); + let text = result[4].content.content_text_only(); + assert!(text.contains("Previous conversation")); + assert!(text.contains("Continue from trajectory")); + } + + /// Complex real-world: mixed cf, user, cf, user, cd_instruction sequence + #[test] + fn test_real_complex_mixed_sequence() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("AGENTS.md", "config", 1, 10)]), + context_file_msg(vec![("knowledge.md", "cached", 1, 4)]), + text_msg("user", "## Previous conversation\n\nWorked on UI"), + text_msg("user", "Continue from trajectory xyz."), + context_file_msg(vec![("new_knowledge.md", "fresh context", 1, 4)]), + text_msg("user", "one more crazy example"), + text_msg("user", "just make sure that this madness is really fixed"), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); // system + one big merged user + let text = result[1].content.content_text_only(); + assert!(text.contains("AGENTS.md")); + assert!(text.contains("knowledge.md")); + assert!(text.contains("Previous conversation")); + assert!(text.contains("Continue from trajectory")); + assert!(text.contains("new_knowledge.md")); + assert!(text.contains("one more crazy example")); + assert!(text.contains("madness is really fixed")); + } + + /// 1x: diff → context_file, user → assistant (7x in real data) + #[test] + fn test_real_diff_then_cf_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "start"), + assistant_msg("making changes"), + ChatMessage { + role: "diff".to_string(), + content: ChatContent::SimpleText("applied patch".to_string()), + tool_call_id: "call_1".to_string(), + ..Default::default() + }, + context_file_msg(vec![("updated.rs", "new code", 1, 10)]), + text_msg("user", "looks good, continue"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, diff(+cf), user + assert_eq!(result.len(), 5); + assert_eq!(result[3].role, "diff"); + let diff_text = result[3].content.content_text_only(); + assert!(diff_text.contains("applied patch")); + assert!(diff_text.contains("updated.rs")); + assert_eq!(result[4].role, "user"); + assert_eq!(result[4].content.content_text_only(), "looks good, continue"); + } + + /// cd_instruction alone (4x: cd_instruction, user) + #[test] + fn test_real_cd_instruction_then_user() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "start"), + assistant_msg("done"), + tool_msg("result", "call_1"), + text_msg("cd_instruction", "💿 Review complete. Present findings to the user."), + text_msg("user", "what did you find?"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool(+cd), user + assert_eq!(result.len(), 5); + let tool_text = result[3].content.content_text_only(); + assert!(tool_text.contains("result")); + assert!(tool_text.contains("💿 Review complete")); + assert_eq!(result[4].role, "user"); + assert_eq!(result[4].content.content_text_only(), "what did you find?"); + } + + /// Multimodal user messages (406x: user with list content in real data) + #[test] + fn test_real_multimodal_user_with_context_file() { + let msgs = vec![ + text_msg("system", "system prompt"), + context_file_msg(vec![("screenshot.md", "description of UI", 1, 5)]), + multimodal_msg("user", vec![ + ("text", "Here's a screenshot of the bug"), + ("image/png", "base64encodeddata"), + ]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 2); // system + merged user + // Should be Multimodal since it contains an image + match &result[1].content { + ChatContent::Multimodal(elements) => { + // Should have: text from cf, text from user, image from user + let texts: Vec<_> = elements.iter().filter(|e| e.is_text()).collect(); + let images: Vec<_> = elements.iter().filter(|e| e.is_image()).collect(); + assert!(!texts.is_empty()); + assert_eq!(images.len(), 1); + let all_text: String = texts.iter().map(|e| e.m_content.as_str()).collect::>().join(" "); + assert!(all_text.contains("screenshot.md")); + assert!(all_text.contains("screenshot of the bug")); + } + _ => panic!("Expected Multimodal content when merging text + image"), + } + } + + /// Core trajectory pattern: tool → context_file folds into tool + #[test] + fn test_tool_cf_folds_into_tool() { + let msgs = vec![ + text_msg("system", "system prompt"), + text_msg("user", "do something"), + assistant_msg("calling tool"), + tool_msg("tool result", "call_1"), + context_file_msg(vec![("file.rs", "fn main() {}", 1, 5)]), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool(+cf) + assert_eq!(result.len(), 4); + assert_eq!(result[3].role, "tool"); + let text = result[3].content.content_text_only(); + assert!(text.contains("tool result")); + assert!(text.contains("file.rs")); + } + + /// Multiple context_files after tool all fold in + #[test] + fn test_tool_multiple_cf_fold() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("found files", "call_1"), + context_file_msg(vec![("a.rs", "aaa", 1, 3)]), + context_file_msg(vec![("b.rs", "bbb", 1, 3)]), + context_file_msg(vec![("c.rs", "ccc", 1, 3)]), + ]; + let result = linearize_thread_for_llm(&msgs); + // user, assistant, tool(+cf+cf+cf) + assert_eq!(result.len(), 3); + assert_eq!(result[2].role, "tool"); + let text = result[2].content.content_text_only(); + assert!(text.contains("found files")); + assert!(text.contains("a.rs")); + assert!(text.contains("b.rs")); + assert!(text.contains("c.rs")); + } + + /// Repeating tool loop: tool→cf→assistant→tool→cf→assistant + #[test] + fn test_repeating_tool_cf_loop() { + let msgs = vec![ + text_msg("system", "sys"), + text_msg("user", "start"), + assistant_msg("step 1"), + tool_msg("result 1", "call_1"), + context_file_msg(vec![("f1.rs", "code1", 1, 5)]), + assistant_msg("step 2"), + tool_msg("result 2", "call_2"), + context_file_msg(vec![("f2.rs", "code2", 1, 5)]), + assistant_msg("done"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, user, assistant, tool(+cf), assistant, tool(+cf), assistant + assert_eq!(result.len(), 7); + assert_eq!(result[3].role, "tool"); + assert!(result[3].content.content_text_only().contains("f1.rs")); + assert_eq!(result[4].role, "assistant"); + assert_eq!(result[5].role, "tool"); + assert!(result[5].content.content_text_only().contains("f2.rs")); + assert_eq!(result[6].role, "assistant"); + } + + /// Real trajectory pattern: 12x tool→cf→assistant repeating + #[test] + fn test_long_tool_cf_loop() { + let mut msgs = vec![ + text_msg("system", "sys"), + context_file_msg(vec![("init.rs", "init", 1, 1)]), + text_msg("user", "fix bugs"), + ]; + for i in 0..12 { + msgs.push(assistant_msg(&format!("step {i}"))); + msgs.push(tool_msg(&format!("result {i}"), &format!("call_{i}"))); + msgs.push(context_file_msg(vec![(&format!("f{i}.rs"), &format!("code{i}"), 1, 5)])); + } + let result = linearize_thread_for_llm(&msgs); + // system, user(init.rs+user), then 12x (assistant, tool(+cf)) = 2 + 24 = 26 + assert_eq!(result.len(), 26); + for i in 0..12 { + let tool_idx = 3 + i * 2; + assert_eq!(result[tool_idx].role, "tool", "idx {tool_idx}"); + let text = result[tool_idx].content.content_text_only(); + assert!(text.contains(&format!("result {i}"))); + assert!(text.contains(&format!("f{i}.rs"))); + } + } + + /// tool → user does NOT fold (user stays separate) + #[test] + fn test_tool_then_real_user_no_fold() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result", "call_1"), + text_msg("user", "thanks, now do more"), + ]; + let result = linearize_thread_for_llm(&msgs); + // user, assistant, tool, user + assert_eq!(result.len(), 4); + assert_eq!(result[2].role, "tool"); + assert_eq!(result[2].content.content_text_only(), "result"); + assert_eq!(result[3].role, "user"); + assert_eq!(result[3].content.content_text_only(), "thanks, now do more"); + } + + /// tool → cf → user: cf folds into tool, user stays separate + #[test] + fn test_tool_cf_then_user_separate() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result", "call_1"), + context_file_msg(vec![("x.rs", "code", 1, 5)]), + text_msg("user", "now fix it"), + ]; + let result = linearize_thread_for_llm(&msgs); + // user, assistant, tool(+cf), user + assert_eq!(result.len(), 4); + assert_eq!(result[2].role, "tool"); + assert!(result[2].content.content_text_only().contains("x.rs")); + assert_eq!(result[3].role, "user"); + assert_eq!(result[3].content.content_text_only(), "now fix it"); + } + + /// tool_call_id preserved when folding into tool + #[test] + fn test_tool_cf_preserves_tool_call_id() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result", "call_abc123"), + context_file_msg(vec![("x.rs", "code", 1, 5)]), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result[2].role, "tool"); + assert_eq!(result[2].tool_call_id, "call_abc123"); + } + + /// context_file after assistant (not tool) → user group as before + #[test] + fn test_cf_after_assistant_becomes_user() { + let msgs = vec![ + text_msg("system", "sys"), + assistant_msg("hello"), + context_file_msg(vec![("x.rs", "code", 1, 5)]), + text_msg("user", "continue"), + ]; + let result = linearize_thread_for_llm(&msgs); + // system, assistant, user(cf+user) + assert_eq!(result.len(), 3); + assert_eq!(result[2].role, "user"); + let text = result[2].content.content_text_only(); + assert!(text.contains("x.rs")); + assert!(text.contains("continue")); + } + + /// Empty context_file after tool doesn't corrupt tool content + #[test] + fn test_tool_empty_cf_no_corruption() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result", "call_1"), + text_msg("context_file", ""), + ]; + let result = linearize_thread_for_llm(&msgs); + assert_eq!(result.len(), 3); + assert_eq!(result[2].role, "tool"); + assert_eq!(result[2].content.content_text_only(), "result"); + } + + /// Idempotency with tool→cf folding + #[test] + fn test_idempotency_tool_cf() { + let msgs = vec![ + text_msg("system", "sys"), + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result", "call_1"), + context_file_msg(vec![("x.rs", "code", 1, 5)]), + text_msg("user", "more"), + ]; + let first = linearize_thread_for_llm(&msgs); + let second = linearize_thread_for_llm(&first); + assert_eq!(first.len(), second.len()); + for (a, b) in first.iter().zip(second.iter()) { + assert_eq!(a.role, b.role); + assert_eq!(a.content.content_text_only(), b.content.content_text_only()); + } + } + + /// Multiple tools then cf folds into LAST tool + #[test] + fn test_multiple_tools_cf_folds_into_last() { + let msgs = vec![ + text_msg("user", "go"), + assistant_msg("ok"), + tool_msg("result A", "call_1"), + tool_msg("result B", "call_2"), + context_file_msg(vec![("x.rs", "code", 1, 5)]), + ]; + let result = linearize_thread_for_llm(&msgs); + // user, assistant, tool(A), tool(B+cf) + assert_eq!(result.len(), 4); + assert_eq!(result[2].content.content_text_only(), "result A"); + assert!(result[3].content.content_text_only().contains("result B")); + assert!(result[3].content.content_text_only().contains("x.rs")); + } +} diff --git a/refact-agent/engine/src/chat/mod.rs b/refact-agent/engine/src/chat/mod.rs new file mode 100644 index 000000000..4d6ad88eb --- /dev/null +++ b/refact-agent/engine/src/chat/mod.rs @@ -0,0 +1,37 @@ +pub mod config; +pub mod cache_guard; +mod content; +mod generation; +mod handlers; +pub mod history_limit; +pub mod linearize; +mod openai_merge; +pub mod prepare; +mod tool_call_recovery; +pub mod prompt_snippets; +pub mod prompts; +mod queue; +mod session; +pub mod stream_core; +pub mod system_context; +pub mod task_agent_monitor; +#[cfg(test)] +mod tests; +pub mod tools; +pub mod trajectories; +pub mod trajectory_ops; +pub mod types; + +pub use session::{ + SessionsMap, create_sessions_map, start_session_cleanup_task, + get_or_create_session_with_trajectory, +}; +pub use queue::process_command_queue; +pub use trajectories::{ + start_trajectory_watcher, TrajectoryEvent, TrajectoryMeta, handle_v1_trajectories_list, + handle_v1_trajectories_all, handle_v1_trajectories_get, handle_v1_trajectories_save, + handle_v1_trajectories_delete, handle_v1_trajectories_subscribe, maybe_save_trajectory, + find_trajectory_path, list_all_trajectories_meta, +}; +pub use handlers::{handle_v1_chat_subscribe, handle_v1_chat_command, handle_v1_chat_cancel_queued}; +pub use task_agent_monitor::start_agent_monitor; diff --git a/refact-agent/engine/src/chat/openai_merge.rs b/refact-agent/engine/src/chat/openai_merge.rs new file mode 100644 index 000000000..b91ee4ab9 --- /dev/null +++ b/refact-agent/engine/src/chat/openai_merge.rs @@ -0,0 +1,386 @@ +use serde_json::json; + +/// Maximum number of parallel tool calls to prevent memory DoS +const MAX_TOOL_CALLS: usize = 128; + +/// Accumulator for streaming tool calls that avoids O(n²) string concatenation. +/// Use `ToolCallAccumulator` for streaming, then call `finalize()` to get the final JSON. +#[derive(Default)] +pub struct ToolCallAccumulator { + pub entries: Vec, +} + +#[derive(Default)] +pub struct ToolCallEntry { + pub id: Option, + pub tool_type: Option, + pub name: String, + pub arguments: String, // Mutable String for efficient append + pub index: usize, + pub initialized: bool, // Track if this entry received meaningful data + pub extra_content: Option, // Gemini thought_signature etc. +} + +impl ToolCallAccumulator { + pub fn merge(&mut self, new_tc: &serde_json::Value) { + let index = new_tc + .get("index") + .and_then(|i| { + i.as_u64() + .or_else(|| i.as_str().and_then(|s| s.parse().ok())) + }) + .unwrap_or(0) as usize; + + // Prevent memory DoS from huge indices + if index >= MAX_TOOL_CALLS { + tracing::warn!("Tool call index {} exceeds maximum {}, ignoring", index, MAX_TOOL_CALLS); + return; + } + + while self.entries.len() <= index { + self.entries.push(ToolCallEntry { + index: self.entries.len(), + ..Default::default() + }); + } + + let entry = &mut self.entries[index]; + + // Track if we received meaningful data (not just an empty delta) + let mut has_meaningful_data = false; + + if let Some(id) = new_tc.get("id").and_then(|v| v.as_str()) { + if !id.is_empty() { + entry.id = Some(id.to_string()); + has_meaningful_data = true; + } + } + + if let Some(t) = new_tc.get("type").and_then(|v| v.as_str()) { + entry.tool_type = Some(t.to_string()); + has_meaningful_data = true; + } + + if let Some(func) = new_tc.get("function") { + if let Some(name) = func.get("name").and_then(|v| v.as_str()) { + if !name.is_empty() { + entry.name = name.to_string(); + has_meaningful_data = true; + } + } + + if let Some(args) = func.get("arguments") { + if !args.is_null() { + // O(1) amortized append to String - avoid unnecessary allocation + if let Some(s) = args.as_str() { + if !s.is_empty() { + entry.arguments.push_str(s); + has_meaningful_data = true; + } + } else { + let serialized = serde_json::to_string(args).unwrap_or_default(); + if !serialized.is_empty() { + entry.arguments.push_str(&serialized); + has_meaningful_data = true; + } + } + } + } + } + + if let Some(extra) = new_tc.get("extra_content") { + if !extra.is_null() { + entry.extra_content = Some(extra.clone()); + has_meaningful_data = true; + } + } + + // Only mark as initialized if we received meaningful data + if has_meaningful_data { + entry.initialized = true; + } + } + + /// Set the final/complete state of a tool call entry, replacing accumulated arguments. + /// Used by `.done` events that carry the complete data instead of incremental deltas. + pub fn set_final(&mut self, new_tc: &serde_json::Value) { + let index = new_tc + .get("index") + .and_then(|i| { + i.as_u64() + .or_else(|| i.as_str().and_then(|s| s.parse().ok())) + }) + .unwrap_or(0) as usize; + + if index >= MAX_TOOL_CALLS { + tracing::warn!("Tool call index {} exceeds maximum {}, ignoring finalize", index, MAX_TOOL_CALLS); + return; + } + + while self.entries.len() <= index { + self.entries.push(ToolCallEntry { + index: self.entries.len(), + ..Default::default() + }); + } + + let entry = &mut self.entries[index]; + let mut has_meaningful_data = false; + + if let Some(id) = new_tc.get("id").and_then(|v| v.as_str()) { + if !id.is_empty() { + entry.id = Some(id.to_string()); + has_meaningful_data = true; + } + } + + if let Some(t) = new_tc.get("type").and_then(|v| v.as_str()) { + entry.tool_type = Some(t.to_string()); + has_meaningful_data = true; + } + + if let Some(func) = new_tc.get("function") { + if let Some(name) = func.get("name").and_then(|v| v.as_str()) { + if !name.is_empty() { + entry.name = name.to_string(); + has_meaningful_data = true; + } + } + + if let Some(args) = func.get("arguments") { + if !args.is_null() { + let final_args = if let Some(s) = args.as_str() { + s.to_string() + } else { + serde_json::to_string(args).unwrap_or_default() + }; + // Replace unconditionally — this is finalization, even empty is valid + entry.arguments = final_args; + has_meaningful_data = true; + } + } + } + + if let Some(extra) = new_tc.get("extra_content") { + if !extra.is_null() { + entry.extra_content = Some(extra.clone()); + has_meaningful_data = true; + } + } + + if has_meaningful_data { + entry.initialized = true; + } + } + + /// Convert accumulated entries to final JSON format. + /// Filters out uninitialized placeholder entries (phantom tool calls). + /// Uses stable synthetic IDs based on index for entries without real IDs. + pub fn finalize(&self) -> Vec { + self.entries + .iter() + .filter(|entry| entry.initialized && !entry.name.is_empty()) + .map(|entry| { + // Use stable synthetic ID based on index, not random UUID + let id = entry.id.clone().unwrap_or_else(|| { + format!("pending_call_{}", entry.index) + }); + let mut tc = json!({ + "id": id, + "type": entry.tool_type.as_deref().unwrap_or("function"), + "index": entry.index, + "function": { + "name": entry.name, + "arguments": entry.arguments + } + }); + if let Some(extra) = &entry.extra_content { + tc["extra_content"] = extra.clone(); + } + tc + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_accumulator_basic_streaming() { + let mut acc = ToolCallAccumulator::default(); + acc.merge(&json!({ + "index": 0, + "id": "call_123", + "type": "function", + "function": {"name": "test", "arguments": "{\"a\":"} + })); + acc.merge(&json!({ + "index": 0, + "function": {"arguments": " 1}"} + })); + + let result = acc.finalize(); + assert_eq!(result.len(), 1); + assert_eq!(result[0]["id"], "call_123"); + assert_eq!(result[0]["function"]["name"], "test"); + assert_eq!(result[0]["function"]["arguments"], "{\"a\": 1}"); + } + + #[test] + fn test_accumulator_parallel_tool_calls() { + let mut acc = ToolCallAccumulator::default(); + acc.merge(&json!({"index": 0, "id": "call_1", "function": {"name": "func1", "arguments": "{}"}})); + acc.merge(&json!({"index": 1, "id": "call_2", "function": {"name": "func2", "arguments": "{}"}})); + + let result = acc.finalize(); + assert_eq!(result.len(), 2); + assert_eq!(result[0]["function"]["name"], "func1"); + assert_eq!(result[1]["function"]["name"], "func2"); + } + + #[test] + fn test_accumulator_generates_stable_id_if_missing() { + let mut acc = ToolCallAccumulator::default(); + acc.merge(&json!({"index": 0, "function": {"name": "test", "arguments": "{}"}})); + + // Call finalize multiple times - ID should be stable + let result1 = acc.finalize(); + let result2 = acc.finalize(); + let id1 = result1[0]["id"].as_str().unwrap(); + let id2 = result2[0]["id"].as_str().unwrap(); + assert_eq!(id1, id2, "ID should be stable across finalize calls"); + assert_eq!(id1, "pending_call_0", "Should use index-based synthetic ID"); + } + + #[test] + fn test_accumulator_filters_phantom_entries() { + let mut acc = ToolCallAccumulator::default(); + // Tool call arrives with index 2 first - creates placeholders for 0 and 1 + acc.merge(&json!({"index": 2, "id": "call_real", "function": {"name": "real_func", "arguments": "{}"}})); + + let result = acc.finalize(); + // Should only have 1 entry (the real one), not 3 phantom entries + assert_eq!(result.len(), 1, "Should filter out uninitialized placeholder entries"); + assert_eq!(result[0]["id"], "call_real"); + assert_eq!(result[0]["function"]["name"], "real_func"); + assert_eq!(result[0]["index"], 2); + } + + #[test] + fn test_accumulator_large_arguments_efficient() { + let mut acc = ToolCallAccumulator::default(); + acc.merge(&json!({"index": 0, "id": "call_1", "function": {"name": "test", "arguments": ""}})); + + // Simulate streaming many small chunks (would be O(n²) with naive concat) + for i in 0..1000 { + acc.merge(&json!({"index": 0, "function": {"arguments": format!("{},", i)}})); + } + + let result = acc.finalize(); + let args = result[0]["function"]["arguments"].as_str().unwrap(); + assert!(args.starts_with("0,1,2,")); + assert!(args.len() > 3000); // Should have all the numbers + } + + #[test] + fn test_accumulator_rejects_huge_index() { + let mut acc = ToolCallAccumulator::default(); + // Try to create a tool call with a huge index (memory DoS attempt) + acc.merge(&json!({"index": 1000000, "id": "call_huge", "function": {"name": "bad", "arguments": "{}"}})); + + // Should be ignored - no entries created + let result = acc.finalize(); + assert!(result.is_empty(), "Huge index should be rejected"); + } + + #[test] + fn test_accumulator_accepts_max_valid_index() { + let mut acc = ToolCallAccumulator::default(); + // Index 127 should be accepted (MAX_TOOL_CALLS = 128) + acc.merge(&json!({"index": 127, "id": "call_max", "function": {"name": "valid", "arguments": "{}"}})); + + let result = acc.finalize(); + assert_eq!(result.len(), 1); + assert_eq!(result[0]["id"], "call_max"); + } + + #[test] + fn test_accumulator_ignores_empty_delta() { + let mut acc = ToolCallAccumulator::default(); + // Empty delta with just index - should not mark as initialized + acc.merge(&json!({"index": 0})); + + let result = acc.finalize(); + assert!(result.is_empty(), "Empty delta should not create initialized entry"); + } + + #[test] + fn test_set_final_replaces_accumulated_arguments() { + let mut acc = ToolCallAccumulator::default(); + // Simulate streaming deltas + acc.merge(&json!({ + "index": 0, + "id": "call_123", + "type": "function", + "function": {"name": "get_weather", "arguments": "{\"loc"} + })); + acc.merge(&json!({ + "index": 0, + "function": {"arguments": "ation\":"} + })); + acc.merge(&json!({ + "index": 0, + "function": {"arguments": "\"Paris\"}"} + })); + + // Now finalize with complete arguments (should replace, not append) + acc.set_final(&json!({ + "index": 0, + "type": "function", + "function": {"name": "get_weather", "arguments": "{\"location\":\"Paris\"}"} + })); + + let result = acc.finalize(); + assert_eq!(result.len(), 1); + assert_eq!(result[0]["function"]["arguments"], "{\"location\":\"Paris\"}"); + assert_eq!(result[0]["id"], "call_123"); + } + + #[test] + fn test_set_final_without_prior_deltas() { + let mut acc = ToolCallAccumulator::default(); + acc.set_final(&json!({ + "index": 0, + "id": "call_456", + "type": "function", + "function": {"name": "search", "arguments": "{\"q\":\"test\"}"} + })); + + let result = acc.finalize(); + assert_eq!(result.len(), 1); + assert_eq!(result[0]["id"], "call_456"); + assert_eq!(result[0]["function"]["name"], "search"); + assert_eq!(result[0]["function"]["arguments"], "{\"q\":\"test\"}"); + } + + #[test] + fn test_accumulator_filters_empty_name_with_arguments() { + let mut acc = ToolCallAccumulator::default(); + acc.merge(&json!({"index": 0, "id": "call_123", "function": {"arguments": "{\"q\":\"test\"}"}})); + + let result = acc.finalize(); + assert!(result.is_empty(), "Tool call with empty name should be filtered out"); + } + + #[test] + fn test_accumulator_empty_strings_not_meaningful() { + let mut acc = ToolCallAccumulator::default(); + // Delta with empty strings - should not mark as initialized + acc.merge(&json!({"index": 0, "id": "", "function": {"name": "", "arguments": ""}})); + + let result = acc.finalize(); + assert!(result.is_empty(), "Empty strings should not create initialized entry"); + } +} diff --git a/refact-agent/engine/src/chat/prepare.rs b/refact-agent/engine/src/chat/prepare.rs new file mode 100644 index 000000000..16d81c148 --- /dev/null +++ b/refact-agent/engine/src/chat/prepare.rs @@ -0,0 +1,744 @@ +use std::sync::Arc; +use std::collections::HashSet; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; + +use crate::at_commands::at_commands::AtCommandsContext; +use crate::at_commands::execute_at::run_at_commands_locally; +use crate::call_validation::{ChatMessage, ChatMeta, ReasoningEffort, SamplingParameters}; +use crate::caps::{resolve_chat_model, ChatModelRecord}; +use crate::global_context::GlobalContext; +use crate::llm::{LlmRequest, CanonicalToolChoice, CommonParams, ReasoningIntent, WireFormat}; +use crate::llm::params::CacheControl; +use crate::scratchpad_abstract::HasTokenizerAndEot; +use crate::scratchpads::scratchpad_utils::HasRagResults; +use crate::tools::tools_description::ToolDesc; +use super::tools::execute_tools; +use super::types::ThreadParams; + +use super::history_limit::fix_and_limit_messages_history; +use super::prompts::prepend_the_right_system_prompt_and_maybe_more_initial_messages; +use super::config::tokens; + +fn responses_stateful_tail(messages: Vec) -> Vec { + // For stateful Responses API (previous_response_id), we should send only *new* items. + // In our chat representation, those are the messages *after* the last assistant message + // (tool outputs, context_files, new user message, etc.). + if let Some(last_asst) = messages.iter().rposition(|m| m.role == "assistant") { + if last_asst + 1 < messages.len() { + return messages[last_asst + 1..].to_vec(); + } + return vec![]; + } + // If we don't have an assistant message yet, keep whatever we have (first turn). + messages +} + +fn last_system_message(messages: &[ChatMessage]) -> Option { + messages + .iter() + .rev() + .find(|m| m.role == "system") + .cloned() +} + +pub struct PreparedChat { + pub llm_request: LlmRequest, + pub limited_messages: Vec, + pub rag_results: Vec, +} + +pub struct ChatPrepareOptions { + pub prepend_system_prompt: bool, + pub allow_at_commands: bool, + pub allow_tool_prerun: bool, + pub supports_tools: bool, + pub tool_choice: Option, + pub parallel_tool_calls: Option, + pub cache_control: CacheControl, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "lowercase")] +pub enum ToolChoice { + Auto, + None, + Required, + #[serde(rename = "function")] + Function { + name: String, + }, +} + +impl Default for ChatPrepareOptions { + fn default() -> Self { + Self { + prepend_system_prompt: true, + allow_at_commands: true, + allow_tool_prerun: true, + supports_tools: true, + tool_choice: None, + parallel_tool_calls: None, + cache_control: CacheControl::Off, + } + } +} + +pub async fn prepare_chat_passthrough( + gcx: Arc>, + ccx: Arc>, + t: &HasTokenizerAndEot, + messages: Vec, + thread: &ThreadParams, + model_id: &str, + mode_id: &str, + tools: Vec, + meta: &ChatMeta, + sampling_parameters: &mut SamplingParameters, + options: &ChatPrepareOptions, +) -> Result { + let mut has_rag_results = HasRagResults::new(); + let tool_names: HashSet = tools.iter().map(|x| x.name.clone()).collect(); + + // 1. Resolve model early to get reasoning params before history limiting + let caps = crate::global_context::try_load_caps_quickly_if_not_present(gcx.clone(), 0) + .await + .map_err(|e| e.message)?; + let model_record = resolve_chat_model(caps, model_id)?; + + let model_n_ctx = if model_record.base.n_ctx > 0 { + model_record.base.n_ctx + } else { + tokens().default_n_ctx + }; + let effective_n_ctx = if let Some(cap) = meta.context_tokens_cap { + if cap == 0 { + model_n_ctx + } else { + cap.min(model_n_ctx) + } + } else { + model_n_ctx + }; + + // 2. Adapt sampling parameters for reasoning models BEFORE history limiting + adapt_sampling_for_reasoning_models(sampling_parameters, &model_record); + + // 3. System prompt injection (decoupled from allow_at_commands) + let prompt_tool_names = if options.allow_at_commands { + tool_names.clone() + } else { + HashSet::new() + }; + let task_meta = ccx.lock().await.task_meta.clone(); + let messages = if options.prepend_system_prompt { + prepend_the_right_system_prompt_and_maybe_more_initial_messages( + gcx.clone(), + messages, + meta, + &task_meta, + &mut has_rag_results, + prompt_tool_names, + mode_id, + model_id, + ) + .await + } else { + messages + }; + + // 4. Run @-commands + let (mut messages, _) = if options.allow_at_commands { + run_at_commands_locally( + ccx.clone(), + t.tokenizer.clone(), + sampling_parameters.max_new_tokens, + messages, + &mut has_rag_results, + ) + .await + } else { + (messages, false) + }; + + // 5. Tool prerun - restricted to allowed tools only + // Safety: Only execute tool calls from the last message if: + // - It's an assistant message with pending tool calls + // - The tool calls have not been answered yet (no subsequent tool result messages) + // This prevents executing tools from injected/external assistant messages. + if options.supports_tools && options.allow_tool_prerun { + if let Some(last_msg) = messages.last() { + if last_msg.role == "assistant" { + if let Some(ref tool_calls) = last_msg.tool_calls { + // Verify these tool calls are pending (no tool results exist for them) + let pending_call_ids: HashSet = tool_calls + .iter() + .map(|tc| tc.id.clone()) + .collect(); + let answered_call_ids: HashSet = messages + .iter() + .filter(|m| m.role == "tool" || m.role == "diff") + .map(|m| m.tool_call_id.clone()) + .collect(); + let unanswered_calls: Vec<_> = tool_calls + .iter() + .filter(|tc| !answered_call_ids.contains(&tc.id)) + .filter(|tc| tool_names.contains(&tc.function.name)) + .cloned() + .collect(); + + if !unanswered_calls.is_empty() && pending_call_ids.len() == unanswered_calls.len() + answered_call_ids.iter().filter(|id| pending_call_ids.contains(*id)).count() { + let mut prerun_thread = thread.clone(); + prerun_thread.context_tokens_cap = Some(effective_n_ctx); + prerun_thread.model = model_id.to_string(); + let (tool_results, _) = execute_tools( + gcx.clone(), + &unanswered_calls, + &messages, + &prerun_thread, + "agent", + Some(&prerun_thread.model), + super::tools::ExecuteToolsOptions::default(), + ) + .await; + messages.extend(tool_results); + } + } + } + } + } + + // 6. Build tools list + let filtered_tools: Vec = if options.supports_tools { + tools + .iter() + .filter(|x| x.is_supported_by(model_id)) + .cloned() + .collect() + } else { + vec![] + }; + let strict_tools = model_record.supports_strict_tools; + let openai_tools: Vec = filtered_tools + .iter() + .map(|tool| tool.clone().into_openai_style(strict_tools)) + .collect(); + + // 7. History validation and fixing + let limited_msgs = fix_and_limit_messages_history(&messages, sampling_parameters)?; + + // 8. Strip thinking blocks if thinking is disabled + let limited_adapted_msgs = + strip_thinking_blocks_if_disabled(limited_msgs, sampling_parameters, &model_record); + + // 9. Linearize thread: merge consecutive user-like messages for cache-friendly + // strict role alternation (system/user/assistant/user/assistant/...) + let mut linearized_msgs = super::linearize::linearize_thread_for_llm(&limited_adapted_msgs); + + // OpenAI Responses API stateful multi-turn: when we chain with previous_response_id, + // we should send only the new tail items (tool outputs and/or new user message). + if model_record.base.wire_format == WireFormat::OpenaiResponses + && thread.previous_response_id.as_ref().is_some_and(|s| !s.is_empty()) + { + let tail = responses_stateful_tail(linearized_msgs.clone()); + let mut stitched = Vec::new(); + if let Some(sys) = last_system_message(&limited_adapted_msgs) { + stitched.push(sys); + } + stitched.extend(tail); + linearized_msgs = stitched; + } + + // 10. Build LlmRequest + // Enforce n=1 for chat - multi-choice not supported in streaming accumulation + let common_params = CommonParams { + n_ctx: Some(effective_n_ctx), + max_tokens: sampling_parameters.max_new_tokens, + temperature: sampling_parameters.temperature, + frequency_penalty: sampling_parameters.frequency_penalty, + stop: sampling_parameters.stop.clone(), + n: Some(1), + }; + + let reasoning = sampling_params_to_reasoning_intent(sampling_parameters, &model_record); + + let tool_choice = options.tool_choice.as_ref().map(|tc| match tc { + ToolChoice::Auto => CanonicalToolChoice::Auto, + ToolChoice::None => CanonicalToolChoice::None, + ToolChoice::Required => CanonicalToolChoice::Required, + ToolChoice::Function { name } => CanonicalToolChoice::Function { name: name.clone() }, + }); + + let mut llm_request = LlmRequest::new(model_id.to_string(), linearized_msgs.clone()) + .with_params(common_params) + .with_tools(openai_tools, tool_choice) + .with_reasoning(reasoning) + .with_parallel_tool_calls(options.parallel_tool_calls.unwrap_or(false)) + .with_cache_control(options.cache_control); + + if model_record.base.wire_format == WireFormat::OpenaiResponses { + llm_request = llm_request.with_previous_response_id(thread.previous_response_id.clone()); + } + + // Add meta for Refact cloud when support_metadata is enabled + if model_record.base.support_metadata { + llm_request = llm_request.with_meta(meta.clone()); + } + + if model_record.base.id.starts_with("openrouter/") && !model_record.available_providers.is_empty() { + if let Some(selected_provider) = model_record.selected_provider.as_ref() { + let mut extra_body = llm_request.extra_body.unwrap_or_default(); + extra_body.insert( + "provider".to_string(), + serde_json::json!({"order": [selected_provider]}), + ); + llm_request.extra_body = Some(extra_body); + } + } + + Ok(PreparedChat { + llm_request, + limited_messages: linearized_msgs, + rag_results: has_rag_results.in_json, + }) +} + +fn adapt_sampling_for_reasoning_models( + sampling_parameters: &mut SamplingParameters, + model_record: &ChatModelRecord, +) { + let user_set_max_tokens = sampling_parameters.max_new_tokens > 0; + + if !user_set_max_tokens { + sampling_parameters.max_new_tokens = model_record.default_max_tokens + .or(model_record.max_output_tokens) + .unwrap_or(4096); + } + + if let Some(max_output) = model_record.max_output_tokens { + if sampling_parameters.max_new_tokens > max_output { + sampling_parameters.max_new_tokens = max_output; + } + } + + if sampling_parameters.temperature.is_none() { + sampling_parameters.temperature = model_record.default_temperature; + } + + if sampling_parameters.frequency_penalty.is_none() { + sampling_parameters.frequency_penalty = model_record.default_frequency_penalty; + } + + let has_reasoning_support = model_record.reasoning_effort_options.is_some() + || model_record.supports_thinking_budget + || model_record.supports_adaptive_thinking_budget; + + if !has_reasoning_support { + sampling_parameters.reasoning_effort = None; + sampling_parameters.thinking = None; + sampling_parameters.thinking_budget = None; + sampling_parameters.enable_thinking = None; + return; + } + + if sampling_parameters.boost_reasoning { + if model_record.supports_thinking_budget && sampling_parameters.thinking_budget.is_none() { + let min_budget = tokens().min_budget_tokens; + let budget = if sampling_parameters.max_new_tokens > min_budget { + (sampling_parameters.max_new_tokens / 2).max(min_budget) + } else { + min_budget + }; + sampling_parameters.thinking_budget = Some(budget); + } + + if let Some(ref options) = model_record.reasoning_effort_options { + if sampling_parameters.reasoning_effort.is_none() && !options.is_empty() { + let default_effort = if options.contains(&"medium".to_string()) { + ReasoningEffort::Medium + } else { + ReasoningEffort::from_str_opt(&options[options.len() - 1]) + .unwrap_or(ReasoningEffort::Medium) + }; + sampling_parameters.reasoning_effort = Some(default_effort); + } + } + } + + if model_record.reasoning_effort_options.is_none() { + sampling_parameters.reasoning_effort = None; + } + if !model_record.supports_thinking_budget && !model_record.supports_adaptive_thinking_budget { + sampling_parameters.thinking_budget = None; + } + sampling_parameters.thinking = None; + sampling_parameters.enable_thinking = None; +} + +fn sampling_params_to_reasoning_intent( + sampling_parameters: &SamplingParameters, + model_record: &ChatModelRecord, +) -> ReasoningIntent { + let has_reasoning_support = model_record.reasoning_effort_options.is_some() + || model_record.supports_thinking_budget + || model_record.supports_adaptive_thinking_budget; + + if !has_reasoning_support { + return ReasoningIntent::Off; + } + + if let Some(budget) = sampling_parameters.thinking_budget { + return ReasoningIntent::BudgetTokens(budget); + } + + if let Some(ref effort) = sampling_parameters.reasoning_effort { + return match effort { + ReasoningEffort::NoReasoning => ReasoningIntent::NoReasoning, + ReasoningEffort::Minimal => ReasoningIntent::Minimal, + ReasoningEffort::Low => ReasoningIntent::Low, + ReasoningEffort::Medium => ReasoningIntent::Medium, + ReasoningEffort::High => ReasoningIntent::High, + ReasoningEffort::XHigh => ReasoningIntent::XHigh, + ReasoningEffort::Max => ReasoningIntent::Max, + }; + } + + if let Some(ref thinking) = sampling_parameters.thinking { + if thinking.get("type").and_then(|t| t.as_str()) == Some("enabled") { + if let Some(budget) = thinking.get("budget_tokens").and_then(|b| b.as_u64()) { + return ReasoningIntent::BudgetTokens(budget as usize); + } + return ReasoningIntent::Medium; + } + } + + if sampling_parameters.enable_thinking == Some(true) { + return ReasoningIntent::Medium; + } + + if sampling_parameters.boost_reasoning { + return ReasoningIntent::Medium; + } + + ReasoningIntent::Off +} + +fn is_thinking_enabled(sampling_parameters: &SamplingParameters) -> bool { + sampling_parameters + .thinking + .as_ref() + .and_then(|t| t.get("type")) + .and_then(|t| t.as_str()) + .map(|t| t == "enabled") + .unwrap_or(false) + || sampling_parameters.reasoning_effort.is_some() + || sampling_parameters.thinking_budget.is_some() + || sampling_parameters.enable_thinking == Some(true) +} + +fn strip_thinking_blocks_if_disabled( + messages: Vec, + sampling_parameters: &SamplingParameters, + model_record: &ChatModelRecord, +) -> Vec { + let has_reasoning = model_record.reasoning_effort_options.is_some() + || model_record.supports_thinking_budget + || model_record.supports_adaptive_thinking_budget; + if !has_reasoning || !is_thinking_enabled(sampling_parameters) { + messages + .into_iter() + .map(|mut msg| { + msg.thinking_blocks = None; + msg.reasoning_content = None; + msg + }) + .collect() + } else { + messages + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::call_validation::ChatContent; + + fn make_model_record_effort(effort_options: Option>) -> ChatModelRecord { + ChatModelRecord { + base: Default::default(), + default_temperature: Some(0.7), + reasoning_effort_options: effort_options.map(|opts| opts.into_iter().map(|s| s.to_string()).collect()), + ..Default::default() + } + } + + fn make_model_record_thinking_budget() -> ChatModelRecord { + ChatModelRecord { + base: Default::default(), + default_temperature: Some(0.7), + supports_thinking_budget: true, + ..Default::default() + } + } + + fn make_model_record_adaptive() -> ChatModelRecord { + ChatModelRecord { + base: Default::default(), + default_temperature: Some(0.7), + supports_adaptive_thinking_budget: true, + reasoning_effort_options: Some(vec!["low".to_string(), "medium".to_string(), "high".to_string(), "max".to_string()]), + ..Default::default() + } + } + + fn make_model_record_no_reasoning() -> ChatModelRecord { + ChatModelRecord { + base: Default::default(), + default_temperature: Some(0.7), + ..Default::default() + } + } + + fn make_sampling_params() -> SamplingParameters { + SamplingParameters { + max_new_tokens: 4096, + temperature: Some(1.0), + reasoning_effort: None, + thinking: None, + enable_thinking: None, + boost_reasoning: false, + ..Default::default() + } + } + + #[test] + fn test_is_thinking_enabled_with_thinking_json() { + let mut params = make_sampling_params(); + params.thinking = Some(serde_json::json!({"type": "enabled", "budget_tokens": 1024})); + assert!(is_thinking_enabled(¶ms)); + } + + #[test] + fn test_is_thinking_enabled_with_thinking_disabled() { + let mut params = make_sampling_params(); + params.thinking = Some(serde_json::json!({"type": "disabled"})); + assert!(!is_thinking_enabled(¶ms)); + } + + #[test] + fn test_is_thinking_enabled_with_reasoning_effort() { + let mut params = make_sampling_params(); + params.reasoning_effort = Some(ReasoningEffort::Medium); + assert!(is_thinking_enabled(¶ms)); + } + + #[test] + fn test_is_thinking_enabled_with_enable_thinking_true() { + let mut params = make_sampling_params(); + params.enable_thinking = Some(true); + assert!(is_thinking_enabled(¶ms)); + } + + #[test] + fn test_is_thinking_enabled_with_enable_thinking_false() { + let mut params = make_sampling_params(); + params.enable_thinking = Some(false); + assert!(!is_thinking_enabled(¶ms)); + } + + #[test] + fn test_is_thinking_enabled_all_none() { + let params = make_sampling_params(); + assert!(!is_thinking_enabled(¶ms)); + } + + #[test] + fn test_strip_thinking_blocks_when_no_reasoning_support() { + let model = make_model_record_no_reasoning(); + let params = make_sampling_params(); + let msgs = vec![ChatMessage { + thinking_blocks: Some(vec![serde_json::json!({"type": "thinking"})]), + content: ChatContent::SimpleText("hello".into()), + ..Default::default() + }]; + let result = strip_thinking_blocks_if_disabled(msgs, ¶ms, &model); + assert!(result[0].thinking_blocks.is_none()); + } + + #[test] + fn test_strip_thinking_blocks_when_thinking_disabled() { + let model = make_model_record_thinking_budget(); + let params = make_sampling_params(); + let msgs = vec![ChatMessage { + thinking_blocks: Some(vec![serde_json::json!({"type": "thinking"})]), + content: ChatContent::SimpleText("hello".into()), + ..Default::default() + }]; + let result = strip_thinking_blocks_if_disabled(msgs, ¶ms, &model); + assert!(result[0].thinking_blocks.is_none()); + } + + #[test] + fn test_strip_thinking_blocks_preserves_when_enabled() { + let model = make_model_record_thinking_budget(); + let mut params = make_sampling_params(); + params.thinking = Some(serde_json::json!({"type": "enabled", "budget_tokens": 1024})); + let msgs = vec![ChatMessage { + thinking_blocks: Some(vec![serde_json::json!({"type": "thinking"})]), + content: ChatContent::SimpleText("hello".into()), + ..Default::default() + }]; + let result = strip_thinking_blocks_if_disabled(msgs, ¶ms, &model); + assert!(result[0].thinking_blocks.is_some()); + } + + #[test] + fn test_strip_thinking_blocks_preserves_other_fields() { + let model = make_model_record_no_reasoning(); + let params = make_sampling_params(); + let msgs = vec![ChatMessage { + role: "assistant".into(), + content: ChatContent::SimpleText("hello".into()), + reasoning_content: Some("reasoning".into()), + thinking_blocks: Some(vec![serde_json::json!({"type": "thinking"})]), + citations: vec![serde_json::json!({"url": "http://x"})], + ..Default::default() + }]; + let result = strip_thinking_blocks_if_disabled(msgs, ¶ms, &model); + assert_eq!(result[0].role, "assistant"); + assert_eq!(result[0].reasoning_content, None); + assert_eq!(result[0].citations.len(), 1); + assert!(result[0].thinking_blocks.is_none()); + } + + #[test] + fn test_adapt_sampling_effort_boost_reasoning() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + params.temperature = None; + let model = make_model_record_effort(Some(vec!["low", "medium", "high"])); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::Medium)); + assert_eq!(params.temperature, Some(0.7)); + } + + #[test] + fn test_adapt_sampling_effort_preserves_user_temperature() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + params.temperature = Some(0.3); + let model = make_model_record_effort(Some(vec!["low", "medium", "high"])); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::Medium)); + assert_eq!(params.temperature, Some(0.3)); + } + + #[test] + fn test_adapt_sampling_effort_takes_precedence() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + params.reasoning_effort = Some(ReasoningEffort::High); + let model = make_model_record_effort(Some(vec!["low", "medium", "high"])); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::High)); + } + + #[test] + fn test_adapt_sampling_thinking_budget_boost_reasoning() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + params.max_new_tokens = 4096; + let model = make_model_record_thinking_budget(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert!(params.thinking_budget.is_some()); + assert!(params.thinking_budget.unwrap() > 0); + assert!(params.reasoning_effort.is_none()); + assert!(params.thinking.is_none()); + assert!(params.enable_thinking.is_none()); + } + + #[test] + fn test_adapt_sampling_thinking_budget_explicit_preserved() { + let mut params = make_sampling_params(); + params.thinking_budget = Some(5000); + let model = make_model_record_thinking_budget(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.thinking_budget, Some(5000)); + assert!(params.reasoning_effort.is_none()); + assert!(params.thinking.is_none()); + } + + #[test] + fn test_adapt_sampling_thinking_budget_no_boost_no_budget() { + let mut params = make_sampling_params(); + let model = make_model_record_thinking_budget(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert!(params.thinking_budget.is_none()); + assert!(params.reasoning_effort.is_none()); + } + + #[test] + fn test_adapt_sampling_adaptive_boost_reasoning() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + let model = make_model_record_adaptive(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::Medium)); + assert!(params.thinking.is_none()); + assert!(params.enable_thinking.is_none()); + } + + #[test] + fn test_adapt_sampling_adaptive_preserves_reasoning_effort() { + let mut params = make_sampling_params(); + params.reasoning_effort = Some(ReasoningEffort::High); + let model = make_model_record_adaptive(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::High)); + } + + #[test] + fn test_adapt_sampling_no_reasoning_clears_all() { + let mut params = make_sampling_params(); + params.reasoning_effort = Some(ReasoningEffort::High); + params.thinking = Some(serde_json::json!({"type": "enabled"})); + params.enable_thinking = Some(true); + let model = make_model_record_no_reasoning(); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert!(params.reasoning_effort.is_none()); + assert!(params.thinking.is_none()); + assert!(params.enable_thinking.is_none()); + } + + #[test] + fn test_adapt_sampling_effort_default_to_last_option() { + let mut params = make_sampling_params(); + params.boost_reasoning = true; + let model = make_model_record_effort(Some(vec!["low", "high"])); + adapt_sampling_for_reasoning_models(&mut params, &model); + assert_eq!(params.reasoning_effort, Some(ReasoningEffort::High)); + } + + #[test] + fn test_no_reasoning_intent_for_no_support() { + let model = make_model_record_no_reasoning(); + let mut params = make_sampling_params(); + params.boost_reasoning = true; + let intent = sampling_params_to_reasoning_intent(¶ms, &model); + assert_eq!(intent, ReasoningIntent::Off); + + params.reasoning_effort = Some(ReasoningEffort::High); + let intent = sampling_params_to_reasoning_intent(¶ms, &model); + assert_eq!(intent, ReasoningIntent::Off); + } + + #[test] + fn test_chat_prepare_options_default() { + let opts = ChatPrepareOptions::default(); + assert!(opts.prepend_system_prompt); + assert!(opts.allow_at_commands); + assert!(opts.allow_tool_prerun); + assert!(opts.supports_tools); + } +} diff --git a/refact-agent/engine/src/chat/prompt_snippets.rs b/refact-agent/engine/src/chat/prompt_snippets.rs new file mode 100644 index 000000000..b03b73583 --- /dev/null +++ b/refact-agent/engine/src/chat/prompt_snippets.rs @@ -0,0 +1,64 @@ +// todo agent: get rid of these, integrate directly to mode prompts + +pub const CD_INSTRUCTIONS: &str = r#"You might receive additional instructions that start with 💿. Those are not coming from the user, they are programmed to help you operate +well and they are always in English. Answer in the language the user has asked the question."#; + +pub const SHELL_INSTRUCTIONS: &str = r#"When running on user's laptop, you most likely have the shell() tool. It's for one-time dependency installations, or doing whatever +user is asking you to do. Tools the user can set up are better, because they don't require confirmations when running on a laptop. +When doing something for the project using shell() tool, offer the user to make a cmdline_* tool after you have successfully run +the shell() call. But double-check that it doesn't already exist, and it is actually typical for this kind of project. You can offer +this by writing: + +🧩SETTINGS:cmdline_cargo_check + +from a new line, that will open (when clicked) a wizard that creates `cargo check` (in this example) command line tool. + +In a similar way, service_* tools work. The difference is cmdline_* is designed for non-interactive blocking commands that immediately +return text in stdout/stderr, and service_* is designed for blocking background commands, such as hypercorn server that runs forever until you hit Ctrl+C. +Here is another example: + +🧩SETTINGS:service_hypercorn"#; + +pub const AGENT_EXPLORATION_INSTRUCTIONS: &str = r#"2. **Delegate exploration to subagent()**: +- "Find all usages of symbol X" → subagent with search_symbol_usages, cat, knowledge +- "Understand how module Y works" → subagent with cat, tree, search_pattern, knowledge +- "Find files matching pattern Z" → subagent with search_pattern, tree +- "Trace data flow from A to B" → subagent with search_symbol_definition, cat, knowledge +- "Find the usage of a lib in the web" → subagent with web, knowledge +- "Find similar past work" → subagent with search_trajectories, get_trajectory_context +- "Check project knowledge" → subagent with knowledge + +**Tools available for subagents**: +- `tree()` - project structure; add `use_ast=true` for symbols +- `cat()` - read files; supports line ranges like `file.rs:10-50` +- `search_symbol_definition()` - trace code flow +- `search_pattern()` - regex search across file names and contents +- `search_semantic()` - conceptual/similarity matches +- `web()`, `web_search()` - external documentation +- `knowledge()` - search project knowledge base +- `search_trajectories()` - find relevant past conversations +- `get_trajectory_context()` - retrieve messages from a trajectory + +**For complex analysis**: delegate to `strategic_planning()` which automatically gathers relevant files"#; + +pub const AGENT_EXECUTION_INSTRUCTIONS: &str = r#"3. Plan (when needed) + - **Trivial changes** (typo, one-liner): do yourself or delegate single subagent + - **Clear changes**: briefly state what you'll do, then delegate implementation to subagent + - **Significant changes**: post a bullet-point summary, ask "Does this look right?", then delegate + - **Multi-file changes**: spawn parallel subagents for independent file updates + +4. Implement without Delegation + - Do not delegate file modifications to subagents + - Execute the plan yourself + +5. Validate via Delegation + - Delegate test runs: `subagent(task="Run tests and report failures", tools="shell,cat")` + - For significant changes, run `code_review()` to check for bugs, missing tests, and code quality issues + - Review results and decide on next steps + - Iterate until green or explain the blocker to user"#; + +pub const AGENT_EXECUTION_INSTRUCTIONS_NO_TOOLS: &str = r#" - Propose the changes to the user + - the suspected root cause + - the exact files/functions to modify or create + - the new or updated tests to add + - the expected outcome and success criteria"#; diff --git a/refact-agent/engine/src/chat/prompts.rs b/refact-agent/engine/src/chat/prompts.rs new file mode 100644 index 000000000..6d01c6f14 --- /dev/null +++ b/refact-agent/engine/src/chat/prompts.rs @@ -0,0 +1,680 @@ +use std::collections::HashSet; +use std::fs; +use std::sync::Arc; +use std::path::PathBuf; +use tokio::sync::RwLock as ARwLock; + +use crate::call_validation; +use crate::files_correction::get_project_dirs; +use crate::global_context::GlobalContext; +use crate::http::http_post_json; +use crate::http::routers::v1::system_prompt::{PrependSystemPromptPost, PrependSystemPromptResponse}; +use crate::integrations::docker::docker_container_manager::docker_container_get_host_lsp_port_to_connect; +use crate::scratchpads::scratchpad_utils::HasRagResults; +use super::system_context::{ + self, create_instruction_files_message, create_memories_message, gather_system_context, + generate_git_info_prompt, gather_git_info, PROJECT_CONTEXT_MARKER, +}; +use crate::yaml_configs::project_information::load_project_information_config; +use crate::call_validation::{ChatMessage, ChatContent, ContextFile, canonical_mode_id}; +use crate::tasks::storage::infer_task_id_from_chat_id; +use crate::tools::tool_task_memory::load_task_memories; +use crate::yaml_configs::customization_registry::{get_mode_config, map_legacy_mode_to_id}; + +pub async fn get_mode_system_prompt( + gcx: Arc>, + mode_id: &str, + model_id: Option<&str>, +) -> String { + let mode_id = map_legacy_mode_to_id(mode_id); + + match get_mode_config(gcx, mode_id, model_id).await { + Some(mode_config) => mode_config.prompt, + None => { + tracing::warn!("Mode '{}' not found, using empty prompt", mode_id); + String::new() + } + } +} + +async fn _workspace_info(workspace_dirs: &[String], active_file_path: &Option) -> String { + async fn get_vcs_info(detect_vcs_at: &PathBuf) -> String { + let mut info = String::new(); + if let Some((vcs_path, vcs_type)) = + crate::files_in_workspace::detect_vcs_for_a_file_path(detect_vcs_at).await + { + info.push_str(&format!( + "\nThe project is under {} version control, located at:\n{}", + vcs_type, + vcs_path.display() + )); + } else { + info.push_str("\nThere's no version control detected, complain to user if they want to use anything git/hg/svn/etc."); + } + info + } + let mut info = String::new(); + if !workspace_dirs.is_empty() { + info.push_str(&format!( + "The current IDE workspace has these project directories:\n{}", + workspace_dirs.join("\n") + )); + } + let detect_vcs_at_option = active_file_path + .clone() + .or_else(|| workspace_dirs.get(0).map(PathBuf::from)); + if let Some(detect_vcs_at) = detect_vcs_at_option { + let vcs_info = get_vcs_info(&detect_vcs_at).await; + if let Some(active_file) = active_file_path { + info.push_str(&format!( + "\n\nThe active IDE file is:\n{}", + active_file.display() + )); + } else { + info.push_str("\n\nThere is no active file currently open in the IDE."); + } + info.push_str(&vcs_info); + } else { + info.push_str("\n\nThere is no active file with version control, complain to user if they want to use anything git/hg/svn/etc and ask to open a file in IDE for you to know which project is active."); + } + info +} + +pub async fn dig_for_project_summarization_file( + gcx: Arc>, +) -> (bool, Option) { + match crate::files_correction::get_active_project_path(gcx.clone()).await { + Some(active_project_path) => { + let summary_path = active_project_path + .join(".refact") + .join("project_summary.yaml"); + if !summary_path.exists() { + (false, Some(summary_path.to_string_lossy().to_string())) + } else { + (true, Some(summary_path.to_string_lossy().to_string())) + } + } + None => { + tracing::info!("No projects found, project summarization is not relevant."); + (false, None) + } + } +} + +async fn _read_project_summary(summary_path: String) -> Option { + match fs::read_to_string(summary_path) { + Ok(content) => { + if let Ok(yaml) = serde_yaml::from_str::(&content) { + if let Some(project_summary) = yaml.get("project_summary") { + match project_summary { + serde_yaml::Value::String(s) => Some(s.clone()), + _ => { + tracing::error!("'project_summary' is not a string in YAML file."); + None + } + } + } else { + tracing::error!("Key 'project_summary' not found in YAML file."); + None + } + } else { + tracing::error!("Failed to parse project summary YAML file."); + None + } + } + Err(e) => { + tracing::error!("Failed to read project summary file: {}", e); + None + } + } +} + +pub async fn system_prompt_add_extra_instructions( + gcx: Arc>, + system_prompt: String, + tool_names: HashSet, + chat_meta: &call_validation::ChatMeta, + task_meta: &Option, +) -> String { + let include_project_info = chat_meta.include_project_info; + + // Load project information config to respect user settings + let config = load_project_information_config(gcx.clone()).await; + // If config is globally disabled, treat as if include_project_info is false + let include_project_info = include_project_info && config.enabled; + + async fn workspace_files_info( + gcx: &Arc>, + ) -> (Vec, Option) { + let gcx_locked = gcx.read().await; + let documents_state = &gcx_locked.documents_state; + let dirs_locked = documents_state.workspace_folders.lock().unwrap(); + let workspace_dirs = dirs_locked + .clone() + .into_iter() + .map(|x| x.to_string_lossy().to_string()) + .collect(); + let active_file_path = documents_state.active_file_path.clone(); + (workspace_dirs, active_file_path) + } + + // Helper to truncate content to max chars + fn truncate_to_chars(s: &str, max_chars: usize) -> String { + if s.chars().count() <= max_chars { + s.to_string() + } else { + let truncated: String = s.chars().take(max_chars).collect(); + format!("{}\n[TRUNCATED]", truncated) + } + } + + let mut system_prompt = system_prompt.clone(); + + // %SYSTEM_INFO% - OS, datetime, username, architecture + // Respects config.sections.system_info.enabled and max_chars + if system_prompt.contains("%SYSTEM_INFO%") { + if include_project_info && config.sections.system_info.enabled { + let system_info = system_context::SystemInfo::gather(); + let mut content = system_info.to_prompt_string(); + if let Some(max_chars) = config.sections.system_info.max_chars { + content = truncate_to_chars(&content, max_chars); + } + system_prompt = system_prompt.replace("%SYSTEM_INFO%", &content); + } else { + system_prompt = system_prompt.replace("%SYSTEM_INFO%", ""); + } + } + + // %ENVIRONMENT_INFO% - Detected environments and usage instructions + // Respects config.sections.environment_instructions.enabled and max_chars + if system_prompt.contains("%ENVIRONMENT_INFO%") { + if include_project_info && config.sections.environment_instructions.enabled { + let project_dirs = get_project_dirs(gcx.clone()).await; + let environments = system_context::detect_environments(&project_dirs).await; + let mut env_instructions = system_context::generate_environment_instructions(&environments); + if let Some(max_chars) = config.sections.environment_instructions.max_chars { + env_instructions = truncate_to_chars(&env_instructions, max_chars); + } + system_prompt = system_prompt.replace("%ENVIRONMENT_INFO%", &env_instructions); + } else { + system_prompt = system_prompt.replace("%ENVIRONMENT_INFO%", ""); + } + } + + // %PROJECT_CONFIGS% - Detected project configuration files + // Respects config.sections.project_configs.enabled and max_items + if system_prompt.contains("%PROJECT_CONFIGS%") { + if include_project_info && config.sections.project_configs.enabled { + let project_dirs = get_project_dirs(gcx.clone()).await; + let configs = system_context::find_project_configs(&project_dirs).await; + let max_items = config.sections.project_configs.max_items.unwrap_or(30); + let configs_to_show: Vec<_> = configs.into_iter().take(max_items).collect(); + if !configs_to_show.is_empty() { + let config_list = configs_to_show + .iter() + .map(|c| format!("- {} ({})", c.file_name, c.category)) + .collect::>() + .join("\n"); + let config_section = format!("## Project Configuration Files\n{}", config_list); + system_prompt = system_prompt.replace("%PROJECT_CONFIGS%", &config_section); + } else { + system_prompt = system_prompt.replace("%PROJECT_CONFIGS%", ""); + } + } else { + system_prompt = system_prompt.replace("%PROJECT_CONFIGS%", ""); + } + } + + // %PROJECT_TREE% - Project file tree + // Respects config.sections.project_tree.enabled, max_depth, and max_chars + if system_prompt.contains("%PROJECT_TREE%") { + if include_project_info && config.sections.project_tree.enabled { + let max_depth = config.sections.project_tree.max_depth.unwrap_or(4); + let max_chars = config.sections.project_tree.max_chars.unwrap_or(16000); + match system_context::generate_compact_project_tree(gcx.clone(), max_depth).await { + Ok(tree) if !tree.is_empty() => { + let tree_content = truncate_to_chars(&tree, max_chars); + let tree_section = format!("## Project Structure\n```\n{}```", tree_content); + system_prompt = system_prompt.replace("%PROJECT_TREE%", &tree_section); + } + _ => { + system_prompt = system_prompt.replace("%PROJECT_TREE%", ""); + } + } + } else { + system_prompt = system_prompt.replace("%PROJECT_TREE%", ""); + } + } + + // %GIT_INFO% - Git repository information + // Respects config.sections.git_info.enabled and max_chars + if system_prompt.contains("%GIT_INFO%") { + if include_project_info && config.sections.git_info.enabled { + let project_dirs = get_project_dirs(gcx.clone()).await; + let git_infos = gather_git_info(&project_dirs).await; + let mut git_section = generate_git_info_prompt(&git_infos); + if let Some(max_chars) = config.sections.git_info.max_chars { + git_section = truncate_to_chars(&git_section, max_chars); + } + system_prompt = system_prompt.replace("%GIT_INFO%", &git_section); + } else { + system_prompt = system_prompt.replace("%GIT_INFO%", ""); + } + } + + if system_prompt.contains("%WORKSPACE_INFO%") { + if include_project_info { + let (workspace_dirs, active_file_path) = workspace_files_info(&gcx).await; + let info = _workspace_info(&workspace_dirs, &active_file_path).await; + system_prompt = system_prompt.replace("%WORKSPACE_INFO%", &info); + } else { + system_prompt = system_prompt.replace("%WORKSPACE_INFO%", ""); + } + } + + if system_prompt.contains("%AGENT_WORKTREE%") { + let worktree_info = if let Some(tm) = task_meta { + if let Some(ref card_id) = tm.card_id { + match crate::tasks::storage::load_board(gcx.clone(), &tm.task_id).await { + Ok(board) => { + if let Some(card) = board.get_card(card_id) { + if let Some(ref worktree) = card.agent_worktree { + format!("## Your Working Directory\nYou are working in an isolated git worktree at:\n`{}`\n\nAll your file operations should be within this directory. Changes here don't affect the main repository until merged.", worktree) + } else { + String::new() + } + } else { + String::new() + } + } + Err(_) => String::new(), + } + } else { + String::new() + } + } else { + String::new() + }; + system_prompt = system_prompt.replace("%AGENT_WORKTREE%", &worktree_info); + } + + if system_prompt.contains("%KNOWLEDGE_INSTRUCTIONS%") { + system_prompt = system_prompt.replace("%KNOWLEDGE_INSTRUCTIONS%", ""); + } + + if system_prompt.contains("%PROJECT_SUMMARY%") { + if include_project_info { + let (exists, summary_path_option) = + dig_for_project_summarization_file(gcx.clone()).await; + if exists { + if let Some(summary_path) = summary_path_option { + if let Some(project_info) = _read_project_summary(summary_path).await { + system_prompt = system_prompt.replace("%PROJECT_SUMMARY%", &project_info); + } else { + system_prompt = system_prompt.replace("%PROJECT_SUMMARY%", ""); + } + } + } else { + system_prompt = system_prompt.replace("%PROJECT_SUMMARY%", ""); + } + } else { + system_prompt = system_prompt.replace("%PROJECT_SUMMARY%", ""); + } + } + + if system_prompt.contains("%EXPLORE_FILE_EDIT_INSTRUCTIONS%") { + let replacement = + if tool_names.contains("create_textdoc") || tool_names.contains("update_textdoc") { + "- Then use `*_textdoc()` tools to make changes.\n" + } else { + "" + }; + + system_prompt = system_prompt.replace("%EXPLORE_FILE_EDIT_INSTRUCTIONS%", replacement); + } + + if system_prompt.contains("%AGENT_EXPLORATION_INSTRUCTIONS%") { + system_prompt = system_prompt.replace( + "%AGENT_EXPLORATION_INSTRUCTIONS%", + super::prompt_snippets::AGENT_EXPLORATION_INSTRUCTIONS + ); + } + + if system_prompt.contains("%AGENT_EXECUTION_INSTRUCTIONS%") { + let has_edit_tools = + tool_names.contains("create_textdoc") || tool_names.contains("update_textdoc"); + let replacement = if has_edit_tools { + super::prompt_snippets::AGENT_EXECUTION_INSTRUCTIONS + } else { + super::prompt_snippets::AGENT_EXECUTION_INSTRUCTIONS_NO_TOOLS + }; + system_prompt = system_prompt.replace("%AGENT_EXECUTION_INSTRUCTIONS%", replacement); + } + + if system_prompt.contains("%CD_INSTRUCTIONS%") { + system_prompt = system_prompt.replace( + "%CD_INSTRUCTIONS%", + super::prompt_snippets::CD_INSTRUCTIONS + ); + } + + if system_prompt.contains("%SHELL_INSTRUCTIONS%") { + system_prompt = system_prompt.replace( + "%SHELL_INSTRUCTIONS%", + super::prompt_snippets::SHELL_INSTRUCTIONS + ); + } + + system_prompt +} + +pub async fn prepend_the_right_system_prompt_and_maybe_more_initial_messages( + gcx: Arc>, + mut messages: Vec, + chat_meta: &call_validation::ChatMeta, + task_meta: &Option, + stream_back_to_user: &mut HasRagResults, + tool_names: HashSet, + mode_id: &str, + model_id: &str, +) -> Vec { + if messages.is_empty() { + tracing::error!("What's that? Messages list is empty"); + return messages; + } + + let have_system = messages + .first() + .map(|m| m.role == "system") + .unwrap_or(false); + let have_project_context = messages + .iter() + .any(|m| m.role == "context_file" && m.tool_call_id == PROJECT_CONTEXT_MARKER); + + let is_inside_container = gcx.read().await.cmdline.inside_container; + if chat_meta.chat_remote && !is_inside_container { + messages = match prepend_system_prompt_and_maybe_more_initial_messages_from_remote( + gcx.clone(), + &messages, + chat_meta, + stream_back_to_user, + ) + .await + { + Ok(messages_from_remote) => messages_from_remote, + Err(e) => { + tracing::error!("prepend_the_right_system_prompt_and_maybe_more_initial_messages_from_remote: {}", e); + messages + } + }; + return messages; + } + + if !have_system { + let canonical_mode = canonical_mode_id(&chat_meta.chat_mode).unwrap_or_else(|_| "agent".to_string()); + match canonical_mode.as_str() { + "configurator" => { + crate::integrations::config_chat::mix_config_messages( + gcx.clone(), + &chat_meta, + &mut messages, + stream_back_to_user, + ) + .await; + } + "project_summary" => { + crate::integrations::project_summary_chat::mix_project_summary_messages( + gcx.clone(), + &chat_meta, + &mut messages, + stream_back_to_user, + ) + .await; + } + _ => { + let base_prompt = get_mode_system_prompt(gcx.clone(), mode_id, Some(model_id)).await; + let system_message_content = system_prompt_add_extra_instructions( + gcx.clone(), + base_prompt, + tool_names, + chat_meta, + task_meta, + ) + .await; + let msg = ChatMessage { + role: "system".to_string(), + content: ChatContent::SimpleText(system_message_content), + ..Default::default() + }; + stream_back_to_user.push_in_json(serde_json::json!(msg)); + messages.insert(0, msg); + } + } + } + + if chat_meta.include_project_info && !have_project_context { + match gather_and_inject_system_context(&gcx, &mut messages, stream_back_to_user).await { + Ok(()) => {} + Err(e) => { + tracing::warn!("Failed to gather system context: {}", e); + } + } + } else if !chat_meta.include_project_info { + tracing::info!("Skipping project/system context injection (include_project_info=false)"); + } + + let canonical_chat_mode = canonical_mode_id(&chat_meta.chat_mode).unwrap_or_else(|_| "agent".to_string()); + if matches!(canonical_chat_mode.as_str(), "task_planner" | "task_agent") { + match inject_task_memories(&gcx, &mut messages, stream_back_to_user, &chat_meta.chat_id) + .await + { + Ok(()) => {} + Err(e) => { + tracing::warn!("Failed to inject task memories: {}", e); + } + } + } + + tracing::info!("\n\nSYSTEM PROMPT MIXER chat_mode={:?}", chat_meta.chat_mode); + messages +} + +const TASK_MEMORIES_CONTEXT_MARKER: &str = "task_memories_context"; +const MAX_TASK_MEMORY_CONTENT_SIZE: usize = 3000; +const MAX_TASK_MEMORIES_TOTAL_SIZE: usize = 80_000; + +async fn gather_and_inject_system_context( + gcx: &Arc>, + messages: &mut Vec, + stream_back_to_user: &mut HasRagResults, +) -> Result<(), String> { + let context = gather_system_context(gcx.clone(), false, 4).await?; + + if !context.instruction_files.is_empty() { + match create_instruction_files_message(&context.instruction_files).await { + Ok(instr_msg) => { + let insert_pos = messages + .iter() + .position(|m| m.role == "user" || m.role == "assistant") + .unwrap_or(messages.len()); + + stream_back_to_user.push_in_json(serde_json::json!(instr_msg)); + messages.insert(insert_pos, instr_msg); + + tracing::info!( + "Injected {} instruction files at position {}: {:?}", + context.instruction_files.len(), + insert_pos, + context + .instruction_files + .iter() + .map(|f| &f.file_name) + .collect::>() + ); + } + Err(e) => { + tracing::warn!("Failed to create instruction files message: {}", e); + } + } + } + + if !context.memories.is_empty() { + if let Some(memories_msg) = create_memories_message(&context.memories) { + let insert_pos = messages + .iter() + .position(|m| m.role == "user" || m.role == "assistant") + .unwrap_or(messages.len()); + + stream_back_to_user.push_in_json(serde_json::json!(memories_msg)); + messages.insert(insert_pos, memories_msg); + + tracing::info!( + "Injected {} memories at position {}", + context.memories.len(), + insert_pos + ); + } + } + + if !context.detected_environments.is_empty() { + tracing::info!( + "Detected {} environments: {:?}", + context.detected_environments.len(), + context + .detected_environments + .iter() + .map(|e| &e.env_type) + .collect::>() + ); + } + + Ok(()) +} + +pub async fn inject_task_memories( + gcx: &Arc>, + messages: &mut Vec, + stream_back_to_user: &mut HasRagResults, + chat_id: &str, +) -> Result<(), String> { + let task_id = match infer_task_id_from_chat_id(chat_id) { + Some(id) => id, + None => return Ok(()), + }; + + let memories = load_task_memories(gcx.clone(), &task_id).await?; + if memories.is_empty() { + return Ok(()); + } + + let mut context_files: Vec = Vec::new(); + let mut total_size = 0; + let mut included_count = 0; + let mut skipped_count = 0; + + for (path, content) in &memories { + if total_size >= MAX_TASK_MEMORIES_TOTAL_SIZE { + skipped_count += 1; + continue; + } + + let truncated_content = if content.len() > MAX_TASK_MEMORY_CONTENT_SIZE { + format!( + "{}\n\n[TRUNCATED]", + content + .chars() + .take(MAX_TASK_MEMORY_CONTENT_SIZE) + .collect::() + ) + } else { + content.clone() + }; + + let line_count = truncated_content.lines().count().max(1); + total_size += truncated_content.len(); + included_count += 1; + + context_files.push(ContextFile { + file_name: path.to_string_lossy().to_string(), + file_content: truncated_content, + line1: 1, + line2: line_count, + file_rev: None, + symbols: vec![], + gradient_type: -1, + usefulness: 95.0, + skip_pp: true, + }); + } + + if context_files.is_empty() { + return Ok(()); + } + + if skipped_count > 0 { + context_files.push(ContextFile { + file_name: "(task memories summary)".to_string(), + file_content: format!( + "Note: {} task memories included, {} omitted due to size limits. Use task_memories_get() to retrieve all.", + included_count, + skipped_count + ), + line1: 1, + line2: 1, + file_rev: None, + symbols: vec![], + gradient_type: -1, + usefulness: 50.0, + skip_pp: true, + }); + } + + let task_memories_msg = ChatMessage { + role: "context_file".to_string(), + content: ChatContent::ContextFiles(context_files), + tool_call_id: TASK_MEMORIES_CONTEXT_MARKER.to_string(), + ..Default::default() + }; + + let insert_pos = messages + .iter() + .position(|m| m.role == "user" || m.role == "assistant") + .unwrap_or(messages.len()); + + stream_back_to_user.push_in_json(serde_json::json!(task_memories_msg)); + messages.insert(insert_pos, task_memories_msg); + + tracing::info!( + "Injected {} task memories at position {} for task {} ({} skipped)", + included_count, + insert_pos, + task_id, + skipped_count + ); + + Ok(()) +} + +pub async fn prepend_system_prompt_and_maybe_more_initial_messages_from_remote( + gcx: Arc>, + messages: &[call_validation::ChatMessage], + chat_meta: &call_validation::ChatMeta, + stream_back_to_user: &mut HasRagResults, +) -> Result, String> { + let post = PrependSystemPromptPost { + messages: messages.to_vec(), + chat_meta: chat_meta.clone(), + }; + + let port = + docker_container_get_host_lsp_port_to_connect(gcx.clone(), &chat_meta.chat_id).await?; + let url = + format!("http://localhost:{port}/v1/prepend-system-prompt-and-maybe-more-initial-messages"); + let response: PrependSystemPromptResponse = http_post_json(&url, &post).await?; + + for msg in response.messages_to_stream_back { + stream_back_to_user.push_in_json(msg); + } + + Ok(response.messages) +} diff --git a/refact-agent/engine/src/chat/queue.rs b/refact-agent/engine/src/chat/queue.rs new file mode 100644 index 000000000..23d9c2d6c --- /dev/null +++ b/refact-agent/engine/src/chat/queue.rs @@ -0,0 +1,1533 @@ +use std::collections::VecDeque; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use tokio::sync::{Mutex as AMutex, RwLock as ARwLock}; +use tracing::warn; +use uuid::Uuid; + +use crate::call_validation::{ChatContent, ChatMessage}; +use crate::global_context::GlobalContext; + +use super::types::*; +use super::content::parse_content_with_attachments; +use super::generation::{start_generation, prepare_session_preamble_and_knowledge}; +use super::tools::execute_tools_with_session; +use super::trajectories::maybe_save_trajectory; + +fn command_triggers_generation(cmd: &ChatCommand) -> bool { + matches!( + cmd, + ChatCommand::UserMessage { .. } + | ChatCommand::RetryFromIndex { .. } + | ChatCommand::Regenerate {} + ) +} + +pub async fn inject_priority_messages_if_any( + gcx: Arc>, + session_arc: Arc>, +) -> bool { + let priority_requests = { + let mut session = session_arc.lock().await; + let requests = drain_priority_user_messages(&mut session.command_queue); + if !requests.is_empty() { + session.emit_queue_update(); + } + requests + }; + + if priority_requests.is_empty() { + return false; + } + + for request in priority_requests { + if let ChatCommand::UserMessage { + content, + attachments, + } = request.command + { + // Extract data needed for checkpoint creation while holding the lock briefly + let (checkpoints_enabled, chat_id, latest_checkpoint) = { + let session = session_arc.lock().await; + ( + session.thread.checkpoints_enabled, + session.chat_id.clone(), + find_latest_checkpoint(&session), + ) + }; + + // Create checkpoint without holding the session lock (can be slow) + let checkpoints = if checkpoints_enabled { + create_checkpoint_async(gcx.clone(), latest_checkpoint.as_ref(), &chat_id).await + } else { + Vec::new() + }; + + // Reacquire lock to add the message + let mut session = session_arc.lock().await; + let parsed_content = parse_content_with_attachments(&content, &attachments); + let user_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "user".to_string(), + content: parsed_content, + checkpoints, + ..Default::default() + }; + session.add_message(user_message); + } + } + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + true +} + +pub fn find_allowed_command_while_paused(queue: &VecDeque) -> Option { + for (i, req) in queue.iter().enumerate() { + match &req.command { + ChatCommand::ToolDecision { .. } + | ChatCommand::ToolDecisions { .. } + | ChatCommand::Abort {} => { + return Some(i); + } + _ => {} + } + } + None +} + +pub fn find_allowed_command_while_waiting_ide(queue: &VecDeque) -> Option { + for (i, req) in queue.iter().enumerate() { + match &req.command { + ChatCommand::IdeToolResult { .. } | ChatCommand::Abort {} => { + return Some(i); + } + _ => {} + } + } + None +} + +pub fn drain_priority_user_messages(queue: &mut VecDeque) -> Vec { + let mut priority_messages = Vec::new(); + let mut i = 0; + while i < queue.len() { + if queue[i].priority && matches!(queue[i].command, ChatCommand::UserMessage { .. }) { + if let Some(req) = queue.remove(i) { + priority_messages.push(req); + } + } else { + i += 1; + } + } + priority_messages +} + +pub fn drain_non_priority_user_messages( + queue: &mut VecDeque, +) -> Vec { + let mut messages = Vec::new(); + let mut i = 0; + while i < queue.len() { + if !queue[i].priority && matches!(queue[i].command, ChatCommand::UserMessage { .. }) { + if let Some(req) = queue.remove(i) { + messages.push(req); + } + } else { + i += 1; + } + } + messages +} + +pub fn apply_setparams_patch( + thread: &mut ThreadParams, + patch: &serde_json::Value, +) -> (bool, serde_json::Value) { + let mut changed = false; + + if let Some(model) = patch.get("model").and_then(|v| v.as_str()) { + if thread.model != model { + thread.model = model.to_string(); + // Clear provider-specific state that's invalid across models. + // OpenAI Responses API previous_response_id is tied to a specific + // model+endpoint; switching models makes it invalid. + if thread.previous_response_id.is_some() { + tracing::info!("Clearing previous_response_id on model switch"); + thread.previous_response_id = None; + } + changed = true; + } + } + if let Some(mode) = patch.get("mode").and_then(|v| v.as_str()) { + if thread.mode != mode { + thread.mode = mode.to_string(); + changed = true; + } + } + if let Some(boost) = patch.get("boost_reasoning").and_then(|v| v.as_bool()) { + if thread.boost_reasoning != Some(boost) { + thread.boost_reasoning = Some(boost); + changed = true; + } + } + if let Some(effort_val) = patch.get("reasoning_effort") { + let new_val = if effort_val.is_null() { + None + } else if let Some(effort) = effort_val.as_str() { + if effort.is_empty() { None } else { Some(effort.to_string()) } + } else { + thread.reasoning_effort.clone() + }; + if thread.reasoning_effort != new_val { + thread.reasoning_effort = new_val; + changed = true; + } + } + if let Some(budget_val) = patch.get("thinking_budget") { + if budget_val.is_null() { + if thread.thinking_budget.is_some() { + thread.thinking_budget = None; + changed = true; + } + } else if let Some(b) = budget_val.as_u64() { + let new_val = Some(b as usize); + if thread.thinking_budget != new_val { + thread.thinking_budget = new_val; + changed = true; + } + } + } + if let Some(temp_val) = patch.get("temperature") { + if temp_val.is_null() { + if thread.temperature.is_some() { + thread.temperature = None; + changed = true; + } + } else if let Some(t) = temp_val.as_f64() { + let new_val = Some((t as f32).clamp(0.0, 2.0)); + if thread.temperature != new_val { + thread.temperature = new_val; + changed = true; + } + } + // Invalid type (not null, not number) - ignore, keep current value + } + if let Some(freq_val) = patch.get("frequency_penalty") { + if freq_val.is_null() { + if thread.frequency_penalty.is_some() { + thread.frequency_penalty = None; + changed = true; + } + } else if let Some(f) = freq_val.as_f64() { + let new_val = Some((f as f32).clamp(-2.0, 2.0)); + if thread.frequency_penalty != new_val { + thread.frequency_penalty = new_val; + changed = true; + } + } + // Invalid type - ignore + } + if let Some(max_val) = patch.get("max_tokens") { + if max_val.is_null() { + if thread.max_tokens.is_some() { + thread.max_tokens = None; + changed = true; + } + } else if let Some(m) = max_val.as_u64() { + let new_val = Some((m as usize).min(1_000_000)); + if thread.max_tokens != new_val { + thread.max_tokens = new_val; + changed = true; + } + } + // Invalid type - ignore + } + if let Some(parallel_val) = patch.get("parallel_tool_calls") { + if parallel_val.is_null() { + if thread.parallel_tool_calls.is_some() { + thread.parallel_tool_calls = None; + changed = true; + } + } else if let Some(p) = parallel_val.as_bool() { + let new_val = Some(p); + if thread.parallel_tool_calls != new_val { + thread.parallel_tool_calls = new_val; + changed = true; + } + } + // Invalid type - ignore + } + if let Some(tool_use) = patch.get("tool_use").and_then(|v| v.as_str()) { + if thread.tool_use != tool_use { + thread.tool_use = tool_use.to_string(); + changed = true; + } + } + if let Some(cap) = patch.get("context_tokens_cap") { + if cap.is_null() { + if thread.context_tokens_cap.is_some() { + thread.context_tokens_cap = None; + changed = true; + } + } else if let Some(n) = cap.as_u64() { + let new_cap = Some(n as usize); + if thread.context_tokens_cap != new_cap { + thread.context_tokens_cap = new_cap; + changed = true; + } + } + // Invalid type (not null, not number) - ignore, keep current value + } + if let Some(include) = patch.get("include_project_info").and_then(|v| v.as_bool()) { + if thread.include_project_info != include { + thread.include_project_info = include; + changed = true; + } + } + if let Some(enabled) = patch.get("checkpoints_enabled").and_then(|v| v.as_bool()) { + if thread.checkpoints_enabled != enabled { + thread.checkpoints_enabled = enabled; + changed = true; + } + } + if let Some(val) = patch.get("auto_approve_editing_tools").and_then(|v| v.as_bool()) { + if thread.auto_approve_editing_tools != val { + thread.auto_approve_editing_tools = val; + changed = true; + } + } + if let Some(val) = patch.get("auto_approve_dangerous_commands").and_then(|v| v.as_bool()) { + if thread.auto_approve_dangerous_commands != val { + thread.auto_approve_dangerous_commands = val; + changed = true; + } + } + if let Some(task_meta_value) = patch.get("task_meta") { + if !task_meta_value.is_null() { + if let Ok(task_meta) = + serde_json::from_value::(task_meta_value.clone()) + { + thread.task_meta = Some(task_meta); + changed = true; + } + } + } + if let Some(parent_id) = patch.get("parent_id").and_then(|v| v.as_str()) { + let new_val = if parent_id.is_empty() { None } else { Some(parent_id.to_string()) }; + if thread.parent_id != new_val { + thread.parent_id = new_val; + changed = true; + } + } + if let Some(link_type) = patch.get("link_type").and_then(|v| v.as_str()) { + let new_val = if link_type.is_empty() { None } else { Some(link_type.to_string()) }; + if thread.link_type != new_val { + thread.link_type = new_val; + changed = true; + } + } + if let Some(root_chat_id) = patch.get("root_chat_id").and_then(|v| v.as_str()) { + let new_val = if root_chat_id.is_empty() { None } else { Some(root_chat_id.to_string()) }; + if thread.root_chat_id != new_val { + thread.root_chat_id = new_val; + changed = true; + } + } + + let mut sanitized_patch = patch.clone(); + if let Some(obj) = sanitized_patch.as_object_mut() { + obj.remove("type"); + obj.remove("chat_id"); + obj.remove("seq"); + } + + (changed, sanitized_patch) +} + +pub async fn process_command_queue( + gcx: Arc>, + session_arc: Arc>, + processor_running: Arc, +) { + struct ProcessorGuard(Arc); + impl Drop for ProcessorGuard { + fn drop(&mut self) { + self.0.store(false, Ordering::SeqCst); + } + } + let _guard = ProcessorGuard(processor_running); + + loop { + let command = { + let mut session = session_arc.lock().await; + + if session.closed { + return; + } + + let state = session.runtime.state; + let is_busy = + state == SessionState::Generating || state == SessionState::ExecutingTools; + + let notify = session.queue_notify.clone(); + let waiter = notify.notified(); + + if is_busy { + drop(session); + waiter.await; + continue; + } + + if state == SessionState::WaitingIde { + if let Some(idx) = find_allowed_command_while_waiting_ide(&session.command_queue) { + let cmd = session.command_queue.remove(idx); + session.emit_queue_update(); + cmd + } else { + drop(session); + waiter.await; + continue; + } + } else if state == SessionState::Paused { + if let Some(idx) = find_allowed_command_while_paused(&session.command_queue) { + let cmd = session.command_queue.remove(idx); + session.emit_queue_update(); + cmd + } else { + drop(session); + waiter.await; + continue; + } + } else if session.command_queue.is_empty() { + let closed = session.closed; + drop(session); + + if closed { + return; + } + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + + let session = session_arc.lock().await; + if session.closed { + return; + } + if session.command_queue.is_empty() { + let waiter2 = notify.notified(); + drop(session); + waiter2.await; + continue; + } + drop(session); + continue; + } else { + let cmd = session.command_queue.pop_front(); + if let Some(ref req) = cmd { + if command_triggers_generation(&req.command) { + session.runtime.state = SessionState::Generating; + } + } + session.emit_queue_update(); + cmd + } + }; + + let Some(request) = command else { + continue; + }; + + match request.command { + ChatCommand::UserMessage { + content, + attachments, + } => { + let additional_messages = if !request.priority { + let mut session = session_arc.lock().await; + let msgs = drain_non_priority_user_messages(&mut session.command_queue); + if !msgs.is_empty() { + session.emit_queue_update(); + } + msgs + } else { + Vec::new() + }; + + // Extract data needed for checkpoint creation while holding the lock briefly + let (checkpoints_enabled, chat_id, latest_checkpoint) = { + let session = session_arc.lock().await; + ( + session.thread.checkpoints_enabled, + session.chat_id.clone(), + find_latest_checkpoint(&session), + ) + }; + + // Create checkpoint without holding the session lock (can be slow) + let checkpoints = if checkpoints_enabled { + create_checkpoint_async(gcx.clone(), latest_checkpoint.as_ref(), &chat_id).await + } else { + Vec::new() + }; + + // Reacquire lock to add messages + { + let mut session = session_arc.lock().await; + let parsed_content = parse_content_with_attachments(&content, &attachments); + let user_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "user".to_string(), + content: parsed_content, + checkpoints, + ..Default::default() + }; + session.add_message(user_message); + + for additional in additional_messages { + if let ChatCommand::UserMessage { + content: add_content, + attachments: add_attachments, + } = additional.command + { + let add_parsed = + parse_content_with_attachments(&add_content, &add_attachments); + let add_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "user".to_string(), + content: add_parsed, + ..Default::default() + }; + session.add_message(add_message); + } + } + } + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + prepare_session_preamble_and_knowledge(gcx.clone(), session_arc.clone()).await; + start_generation(gcx.clone(), session_arc.clone()).await; + } + ChatCommand::RetryFromIndex { + index, + content, + attachments, + } => { + let mut session = session_arc.lock().await; + session.truncate_messages(index); + let parsed_content = parse_content_with_attachments(&content, &attachments); + let user_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "user".to_string(), + content: parsed_content, + ..Default::default() + }; + session.add_message(user_message); + drop(session); + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + prepare_session_preamble_and_knowledge(gcx.clone(), session_arc.clone()).await; + start_generation(gcx.clone(), session_arc.clone()).await; + } + ChatCommand::SetParams { patch } => { + if !patch.is_object() { + warn!("SetParams patch must be an object, ignoring"); + continue; + } + let mut session = session_arc.lock().await; + let (mut changed, sanitized_patch) = + apply_setparams_patch(&mut session.thread, &patch); + + let title_in_patch = patch.get("title").and_then(|v| v.as_str()); + let is_gen_in_patch = patch.get("is_title_generated").and_then(|v| v.as_bool()); + if let Some(title) = title_in_patch { + let is_generated = is_gen_in_patch.unwrap_or(false); + session.set_title(title.to_string(), is_generated); + } else if let Some(is_gen) = is_gen_in_patch { + if session.thread.is_title_generated != is_gen { + let title = session.thread.title.clone(); + session.set_title(title, is_gen); + changed = true; + } + } + let mut patch_for_chat_sse = sanitized_patch; + if let Some(obj) = patch_for_chat_sse.as_object_mut() { + obj.remove("title"); + obj.remove("is_title_generated"); + } + session.emit(ChatEvent::ThreadUpdated { + params: patch_for_chat_sse, + }); + if changed { + session.increment_version(); + session.touch(); + } + } + ChatCommand::Abort {} => { + let mut session = session_arc.lock().await; + session.abort_stream(); + } + ChatCommand::ToolDecision { + tool_call_id, + accepted, + } => { + let decisions = vec![ToolDecisionItem { + tool_call_id: tool_call_id.clone(), + accepted, + }]; + handle_tool_decisions(gcx.clone(), session_arc.clone(), &decisions).await; + } + ChatCommand::ToolDecisions { decisions } => { + handle_tool_decisions(gcx.clone(), session_arc.clone(), &decisions).await; + } + ChatCommand::IdeToolResult { + tool_call_id, + content, + tool_failed, + } => { + let mut session = session_arc.lock().await; + let tool_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "tool".to_string(), + content: ChatContent::SimpleText(content), + tool_call_id, + tool_failed: Some(tool_failed), + ..Default::default() + }; + session.add_message(tool_message); + session.set_runtime_state(SessionState::Idle, None); + drop(session); + start_generation(gcx.clone(), session_arc.clone()).await; + } + ChatCommand::UpdateMessage { + message_id, + content, + attachments, + regenerate, + } => { + let mut session = session_arc.lock().await; + if session.runtime.state == SessionState::Generating { + session.abort_stream(); + } + let parsed_content = parse_content_with_attachments(&content, &attachments); + if let Some(idx) = session + .messages + .iter() + .position(|m| m.message_id == message_id) + { + let mut updated_msg = session.messages[idx].clone(); + updated_msg.content = parsed_content; + session.update_message(&message_id, updated_msg); + if regenerate && idx + 1 < session.messages.len() { + session.truncate_messages(idx + 1); + drop(session); + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + prepare_session_preamble_and_knowledge(gcx.clone(), session_arc.clone()).await; + start_generation(gcx.clone(), session_arc.clone()).await; + } + } + } + ChatCommand::RemoveMessage { + message_id, + regenerate, + } => { + let mut session = session_arc.lock().await; + if session.runtime.state == SessionState::Generating { + session.abort_stream(); + } + if let Some(idx) = session.remove_message(&message_id) { + if regenerate && idx < session.messages.len() { + session.truncate_messages(idx); + drop(session); + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + prepare_session_preamble_and_knowledge(gcx.clone(), session_arc.clone()).await; + start_generation(gcx.clone(), session_arc.clone()).await; + } + } + } + ChatCommand::Regenerate {} => { + prepare_session_preamble_and_knowledge(gcx.clone(), session_arc.clone()).await; + start_generation(gcx.clone(), session_arc.clone()).await; + } + ChatCommand::RestoreMessages { messages } => { + let mut session = session_arc.lock().await; + for msg_value in messages { + if let Ok(msg) = serde_json::from_value::(msg_value) { + if !is_allowed_role_for_restore(&msg.role) { + continue; + } + let sanitized = sanitize_message_for_restore(&msg); + session.add_message(sanitized); + } + } + drop(session); + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + } + ChatCommand::BranchFromChat { source_chat_id, up_to_message_id } => { + if let Err(e) = super::trajectories::validate_trajectory_id(&source_chat_id) { + warn!("BranchFromChat: invalid source_chat_id: {}", e); + continue; + } + + let sessions = { + let gcx_locked = gcx.read().await; + gcx_locked.chat_sessions.clone() + }; + + let source_session_arc = super::session::get_or_create_session_with_trajectory( + gcx.clone(), + &sessions, + &source_chat_id, + ).await; + + let (messages_to_copy, root_id) = { + let source_session = source_session_arc.lock().await; + let mut msgs = Vec::new(); + let mut found = false; + for m in &source_session.messages { + if is_allowed_role_for_restore(&m.role) { + msgs.push(sanitize_message_for_restore(m)); + } + if m.message_id == up_to_message_id { + found = true; + break; + } + } + if !found { + warn!("BranchFromChat: up_to_message_id '{}' not found in source chat", up_to_message_id); + continue; + } + let root = source_session.thread.root_chat_id.clone() + .unwrap_or_else(|| source_chat_id.clone()); + (msgs, root) + }; + + let mut session = session_arc.lock().await; + session.thread.parent_id = Some(source_chat_id.clone()); + session.thread.link_type = Some("branch".to_string()); + session.thread.root_chat_id = Some(root_id); + + for msg in messages_to_copy { + session.add_message(msg); + } + drop(session); + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + } + } + } +} + +fn is_allowed_role_for_restore(role: &str) -> bool { + matches!(role, "user" | "assistant" | "system" | "tool") +} + +/// Sanitize message for branching - preserves conversation structure but strips: +/// - tool_calls from assistant messages (security: prevents prerun of injected tool calls) +/// - transient metadata (usage, checkpoints, etc.) +fn sanitize_message_for_restore(msg: &ChatMessage) -> ChatMessage { + ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: msg.role.clone(), + content: msg.content.clone(), + tool_calls: None, // Security: strip tool_calls to prevent prerun of restored messages + tool_call_id: msg.tool_call_id.clone(), // Preserve for tool result messages + tool_failed: msg.tool_failed, // Preserve tool execution status + usage: None, // Strip metering data + checkpoints: vec![], // Strip checkpoint data + reasoning_content: msg.reasoning_content.clone(), + thinking_blocks: msg.thinking_blocks.clone(), + citations: msg.citations.clone(), // Preserve citations (e.g., from web_search) + server_content_blocks: msg.server_content_blocks.clone(), // Preserve for multi-turn web_search + finish_reason: None, // Strip finish reason + extra: serde_json::Map::new(), // Strip extra provider-specific data + output_filter: None, + } +} + +async fn handle_tool_decisions( + gcx: Arc>, + session_arc: Arc>, + decisions: &[ToolDecisionItem], +) { + let is_cache_guard_pause = { + let session = session_arc.lock().await; + session + .runtime + .pause_reasons + .iter() + .any(crate::chat::cache_guard::is_cache_guard_pause_reason) + }; + + if is_cache_guard_pause { + let accepted_any = decisions.iter().any(|d| d.accepted); + + { + let mut session = session_arc.lock().await; + if accepted_any { + session.cache_guard_force_next = true; + } + session.runtime.pause_reasons.clear(); + session.runtime.accepted_tool_ids.clear(); + session.runtime.auto_approved_tool_ids.clear(); + session.runtime.paused_message_index = None; + session.set_runtime_state(SessionState::Idle, None); + } + + if accepted_any { + start_generation(gcx.clone(), session_arc.clone()).await; + } else { + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + } + return; + } + + let (auto_approved_ids, has_remaining_pauses, tool_calls_to_execute, messages, thread, any_rejected) = { + let mut session = session_arc.lock().await; + let auto_approved = session.runtime.auto_approved_tool_ids.clone(); + let paused_msg_idx = session.runtime.paused_message_index; + let accepted = session.process_tool_decisions(decisions); + let any_rejected = decisions.iter().any(|d| !d.accepted); + + for id in &accepted { + if !session.runtime.accepted_tool_ids.contains(id) { + session.runtime.accepted_tool_ids.push(id.clone()); + } + } + + for decision in decisions { + if !decision.accepted { + let tool_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "tool".to_string(), + content: ChatContent::SimpleText("Tool execution denied by user".to_string()), + tool_call_id: decision.tool_call_id.clone(), + tool_failed: Some(true), + ..Default::default() + }; + session.add_message(tool_message); + } + } + + let remaining = !session.runtime.pause_reasons.is_empty(); + + let mut ids_to_execute: std::collections::HashSet = session.runtime.accepted_tool_ids.iter().cloned().collect(); + if !any_rejected && !remaining { + for id in &auto_approved { + ids_to_execute.insert(id.clone()); + } + } + + let tool_calls: Vec = if let Some(msg_idx) = paused_msg_idx { + session.messages.get(msg_idx) + .and_then(|m| m.tool_calls.as_ref()) + .map(|tcs| tcs.iter().filter(|tc| ids_to_execute.contains(&tc.id)).cloned().collect()) + .unwrap_or_default() + } else { + session.messages + .iter() + .filter_map(|m| m.tool_calls.as_ref()) + .flatten() + .filter(|tc| ids_to_execute.contains(&tc.id)) + .cloned() + .collect() + }; + + ( + auto_approved, + remaining, + tool_calls, + session.messages.clone(), + session.thread.clone(), + any_rejected, + ) + }; + + if has_remaining_pauses { + return; + } + + { + let mut session = session_arc.lock().await; + session.runtime.accepted_tool_ids.clear(); + session.runtime.auto_approved_tool_ids.clear(); + session.runtime.paused_message_index = None; + } + + if any_rejected && !auto_approved_ids.is_empty() { + let mut session = session_arc.lock().await; + for id in &auto_approved_ids { + let already_handled = session.messages.iter().any(|m| m.role == "tool" && m.tool_call_id == *id); + if already_handled { + continue; + } + let tool_message = ChatMessage { + message_id: Uuid::new_v4().to_string(), + role: "tool".to_string(), + content: ChatContent::SimpleText("Tool execution skipped due to user rejection of related tools".to_string()), + tool_call_id: id.clone(), + tool_failed: Some(true), + ..Default::default() + }; + session.add_message(tool_message); + } + } + + if !tool_calls_to_execute.is_empty() { + { + let mut session = session_arc.lock().await; + session.set_runtime_state(SessionState::ExecutingTools, None); + } + + let (tool_results, _) = execute_tools_with_session( + gcx.clone(), + session_arc.clone(), + &tool_calls_to_execute, + &messages, + &thread, + &thread.mode, + Some(&thread.model), + super::tools::ExecuteToolsOptions::default(), + ) + .await; + + // Determine tool-requested final state before checking abort. + // Some tools (ask_questions/task_done) set abort_flag=true as part of + // normal operation to stop further LLM generation. + let mut final_state = SessionState::Idle; + for tool_call in &tool_calls_to_execute { + match tool_call.function.name.as_str() { + "ask_questions" => final_state = SessionState::WaitingUserInput, + "task_done" => final_state = SessionState::Completed, + _ => {} + } + } + let tool_initiated_stop = matches!( + final_state, + SessionState::Completed | SessionState::WaitingUserInput + ); + + // Check if we were aborted during tool execution + let was_aborted = { + let session = session_arc.lock().await; + session.abort_flag.load(std::sync::atomic::Ordering::Relaxed) + }; + + { + let mut session = session_arc.lock().await; + for result_msg in tool_results { + session.add_message(result_msg); + } + if tool_initiated_stop { + session.set_runtime_state(final_state, None); + } else { + // Always transition to Idle — either normally or after user abort. + // abort_stream() may have already set Idle, but set_runtime_state + // is idempotent and ensures the UI gets the RuntimeUpdated event. + session.set_runtime_state(SessionState::Idle, None); + } + } + + maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + + if was_aborted || tool_initiated_stop { + return; + } + } + + if any_rejected { + { + let mut session = session_arc.lock().await; + session.set_runtime_state(SessionState::Idle, None); + } + maybe_save_trajectory(gcx, session_arc).await; + } else if !tool_calls_to_execute.is_empty() { + start_generation(gcx, session_arc).await; + } else { + { + let mut session = session_arc.lock().await; + session.set_runtime_state(SessionState::Idle, None); + } + maybe_save_trajectory(gcx, session_arc).await; + } +} + +/// Extract the latest checkpoint from session messages (call while holding lock) +fn find_latest_checkpoint(session: &ChatSession) -> Option { + session + .messages + .iter() + .rev() + .find(|msg| msg.role == "user" && !msg.checkpoints.is_empty()) + .and_then(|msg| msg.checkpoints.first().cloned()) +} + +/// Create checkpoint without holding session lock (async, potentially slow) +async fn create_checkpoint_async( + gcx: Arc>, + latest_checkpoint: Option<&crate::git::checkpoints::Checkpoint>, + chat_id: &str, +) -> Vec { + use crate::git::checkpoints::create_workspace_checkpoint; + + match create_workspace_checkpoint(gcx, latest_checkpoint, chat_id).await { + Ok((checkpoint, _)) => { + tracing::info!( + "Checkpoint created for chat {}: {:?}", + chat_id, + checkpoint + ); + vec![checkpoint] + } + Err(e) => { + warn!( + "Failed to create checkpoint for chat {}: {}", + chat_id, e + ); + Vec::new() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + fn make_request(cmd: ChatCommand) -> CommandRequest { + CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: cmd, + } + } + + #[test] + fn test_find_allowed_command_empty_queue() { + let queue = VecDeque::new(); + assert!(find_allowed_command_while_paused(&queue).is_none()); + } + + #[test] + fn test_find_allowed_command_no_allowed() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::SetParams { + patch: json!({"model": "gpt-4"}), + })); + assert!(find_allowed_command_while_paused(&queue).is_none()); + } + + #[test] + fn test_find_allowed_command_finds_tool_decision() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::ToolDecision { + tool_call_id: "tc1".into(), + accepted: true, + })); + assert_eq!(find_allowed_command_while_paused(&queue), Some(1)); + } + + #[test] + fn test_find_allowed_command_finds_tool_decisions() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::ToolDecisions { + decisions: vec![ToolDecisionItem { + tool_call_id: "tc1".into(), + accepted: true, + }], + })); + assert_eq!(find_allowed_command_while_paused(&queue), Some(0)); + } + + #[test] + fn test_find_allowed_command_finds_abort() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("another"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::Abort {})); + assert_eq!(find_allowed_command_while_paused(&queue), Some(2)); + } + + #[test] + fn test_find_allowed_command_returns_first_match() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::Abort {})); + queue.push_back(make_request(ChatCommand::ToolDecision { + tool_call_id: "tc1".into(), + accepted: true, + })); + assert_eq!(find_allowed_command_while_paused(&queue), Some(0)); + } + + #[test] + fn test_apply_setparams_model() { + let mut thread = ThreadParams::default(); + thread.model = "old-model".into(); + let patch = json!({"model": "new-model"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.model, "new-model"); + } + + #[test] + fn test_apply_setparams_no_change_same_value() { + let mut thread = ThreadParams::default(); + thread.model = "gpt-4".into(); + let patch = json!({"model": "gpt-4"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(!changed); + } + + #[test] + fn test_apply_setparams_mode() { + let mut thread = ThreadParams::default(); + let patch = json!({"mode": "NO_TOOLS"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.mode, "NO_TOOLS"); + } + + #[test] + fn test_apply_setparams_boost_reasoning() { + let mut thread = ThreadParams::default(); + let patch = json!({"boost_reasoning": true}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.boost_reasoning, Some(true)); + } + + #[test] + fn test_apply_setparams_tool_use() { + let mut thread = ThreadParams::default(); + let patch = json!({"tool_use": "disabled"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.tool_use, "disabled"); + } + + #[test] + fn test_apply_setparams_context_tokens_cap() { + let mut thread = ThreadParams::default(); + let patch = json!({"context_tokens_cap": 4096}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.context_tokens_cap, Some(4096)); + } + + #[test] + fn test_apply_setparams_context_tokens_cap_null() { + let mut thread = ThreadParams::default(); + thread.context_tokens_cap = Some(4096); + let patch = json!({"context_tokens_cap": null}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert!(thread.context_tokens_cap.is_none()); + } + + #[test] + fn test_apply_setparams_context_tokens_cap_invalid_type_ignored() { + let mut thread = ThreadParams::default(); + thread.context_tokens_cap = Some(4096); + let patch = json!({"context_tokens_cap": "invalid"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(!changed); + assert_eq!(thread.context_tokens_cap, Some(4096)); // Value preserved + } + + #[test] + fn test_apply_setparams_include_project_info() { + let mut thread = ThreadParams::default(); + let patch = json!({"include_project_info": false}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert!(!thread.include_project_info); + } + + #[test] + fn test_apply_setparams_checkpoints_enabled() { + let mut thread = ThreadParams::default(); + let patch = json!({"checkpoints_enabled": false}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert!(!thread.checkpoints_enabled); + } + + #[test] + fn test_apply_setparams_multiple_fields() { + let mut thread = ThreadParams::default(); + let patch = json!({ + "model": "claude-3", + "mode": "EXPLORE", + "boost_reasoning": true, + }); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(changed); + assert_eq!(thread.model, "claude-3"); + assert_eq!(thread.mode, "EXPLORE"); + assert_eq!(thread.boost_reasoning, Some(true)); + } + + #[test] + fn test_apply_setparams_sanitizes_patch() { + let mut thread = ThreadParams::default(); + let patch = json!({ + "model": "gpt-4", + "type": "set_params", + "chat_id": "chat-123", + "seq": "42" + }); + let (_, sanitized) = apply_setparams_patch(&mut thread, &patch); + assert!(sanitized.get("type").is_none()); + assert!(sanitized.get("chat_id").is_none()); + assert!(sanitized.get("seq").is_none()); + assert!(sanitized.get("model").is_some()); + } + + #[test] + fn test_apply_setparams_empty_patch() { + let mut thread = ThreadParams::default(); + let original_model = thread.model.clone(); + let patch = json!({}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(!changed); + assert_eq!(thread.model, original_model); + } + + #[test] + fn test_apply_setparams_invalid_types_ignored() { + let mut thread = ThreadParams::default(); + thread.model = "original".into(); + let patch = json!({ + "model": 123, + "boost_reasoning": "not_a_bool", + }); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + assert!(!changed); + assert_eq!(thread.model, "original"); + } + + #[test] + fn test_find_allowed_command_while_waiting_ide_empty_queue() { + let queue = VecDeque::new(); + assert!(find_allowed_command_while_waiting_ide(&queue).is_none()); + } + + #[test] + fn test_find_allowed_command_while_waiting_ide_no_allowed() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::ToolDecision { + tool_call_id: "tc1".into(), + accepted: true, + })); + assert!(find_allowed_command_while_waiting_ide(&queue).is_none()); + } + + #[test] + fn test_find_allowed_command_while_waiting_ide_finds_ide_tool_result() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::IdeToolResult { + tool_call_id: "tc1".into(), + content: "result".into(), + tool_failed: false, + })); + assert_eq!(find_allowed_command_while_waiting_ide(&queue), Some(1)); + } + + #[test] + fn test_find_allowed_command_while_waiting_ide_finds_abort() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::UserMessage { + content: json!("hi"), + attachments: vec![], + })); + queue.push_back(make_request(ChatCommand::Abort {})); + assert_eq!(find_allowed_command_while_waiting_ide(&queue), Some(1)); + } + + #[test] + fn test_find_allowed_command_while_waiting_ide_returns_first_match() { + let mut queue = VecDeque::new(); + queue.push_back(make_request(ChatCommand::Abort {})); + queue.push_back(make_request(ChatCommand::IdeToolResult { + tool_call_id: "tc1".into(), + content: "result".into(), + tool_failed: false, + })); + assert_eq!(find_allowed_command_while_waiting_ide(&queue), Some(0)); + } + + #[test] + fn test_priority_insertion_before_non_priority() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("first"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-2".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("second"), + attachments: vec![], + }, + }); + let priority_req = CommandRequest { + client_request_id: "req-priority".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("priority"), + attachments: vec![], + }, + }; + let insert_pos = queue + .iter() + .position(|r| !r.priority) + .unwrap_or(queue.len()); + queue.insert(insert_pos, priority_req); + assert_eq!(queue[0].client_request_id, "req-priority"); + assert_eq!(queue[1].client_request_id, "req-1"); + assert_eq!(queue[2].client_request_id, "req-2"); + } + + #[test] + fn test_priority_insertion_after_existing_priority() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-p1".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("p1"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("normal"), + attachments: vec![], + }, + }); + let priority_req = CommandRequest { + client_request_id: "req-p2".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("p2"), + attachments: vec![], + }, + }; + let insert_pos = queue + .iter() + .position(|r| !r.priority) + .unwrap_or(queue.len()); + queue.insert(insert_pos, priority_req); + assert_eq!(queue[0].client_request_id, "req-p1"); + assert_eq!(queue[1].client_request_id, "req-p2"); + assert_eq!(queue[2].client_request_id, "req-1"); + } + + #[test] + fn test_priority_insertion_into_empty_queue() { + let mut queue: VecDeque = VecDeque::new(); + let priority_req = CommandRequest { + client_request_id: "req-p".into(), + priority: true, + command: ChatCommand::Abort {}, + }; + let insert_pos = queue + .iter() + .position(|r| !r.priority) + .unwrap_or(queue.len()); + queue.insert(insert_pos, priority_req); + assert_eq!(queue.len(), 1); + assert_eq!(queue[0].client_request_id, "req-p"); + } + + #[test] + fn test_priority_insertion_all_priority() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-p1".into(), + priority: true, + command: ChatCommand::Abort {}, + }); + let priority_req = CommandRequest { + client_request_id: "req-p2".into(), + priority: true, + command: ChatCommand::Abort {}, + }; + let insert_pos = queue + .iter() + .position(|r| !r.priority) + .unwrap_or(queue.len()); + queue.insert(insert_pos, priority_req); + assert_eq!(queue[0].client_request_id, "req-p1"); + assert_eq!(queue[1].client_request_id, "req-p2"); + } + + #[test] + fn test_drain_priority_user_messages_extracts_only_priority() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-p1".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("priority 1"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("normal"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-p2".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("priority 2"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-abort".into(), + priority: true, + command: ChatCommand::Abort {}, + }); + + let drained = drain_priority_user_messages(&mut queue); + assert_eq!(drained.len(), 2); + assert_eq!(drained[0].client_request_id, "req-p1"); + assert_eq!(drained[1].client_request_id, "req-p2"); + assert_eq!(queue.len(), 2); + assert_eq!(queue[0].client_request_id, "req-1"); + assert_eq!(queue[1].client_request_id, "req-abort"); + } + + #[test] + fn test_drain_non_priority_user_messages_extracts_all_non_priority() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("first"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-p".into(), + priority: true, + command: ChatCommand::UserMessage { + content: json!("priority"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-2".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("second"), + attachments: vec![], + }, + }); + queue.push_back(CommandRequest { + client_request_id: "req-3".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("third"), + attachments: vec![], + }, + }); + + let drained = drain_non_priority_user_messages(&mut queue); + assert_eq!(drained.len(), 3); + assert_eq!(drained[0].client_request_id, "req-1"); + assert_eq!(drained[1].client_request_id, "req-2"); + assert_eq!(drained[2].client_request_id, "req-3"); + assert_eq!(queue.len(), 1); + assert_eq!(queue[0].client_request_id, "req-p"); + } + + #[test] + fn test_drain_priority_skips_non_user_messages() { + let mut queue = VecDeque::new(); + queue.push_back(CommandRequest { + client_request_id: "req-abort".into(), + priority: true, + command: ChatCommand::Abort {}, + }); + queue.push_back(CommandRequest { + client_request_id: "req-params".into(), + priority: true, + command: ChatCommand::SetParams { patch: json!({}) }, + }); + + let drained = drain_priority_user_messages(&mut queue); + assert!(drained.is_empty()); + assert_eq!(queue.len(), 2); + } + + #[test] + fn test_drain_empty_queue() { + let mut queue: VecDeque = VecDeque::new(); + let priority_drained = drain_priority_user_messages(&mut queue); + let non_priority_drained = drain_non_priority_user_messages(&mut queue); + assert!(priority_drained.is_empty()); + assert!(non_priority_drained.is_empty()); + } + + #[test] + fn test_model_switch_clears_previous_response_id() { + let mut thread = ThreadParams::default(); + thread.model = "openai/gpt-4".into(); + thread.previous_response_id = Some("resp_abc123".to_string()); + + let patch = json!({"model": "anthropic/claude-3"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + + assert!(changed); + assert_eq!(thread.model, "anthropic/claude-3"); + assert_eq!(thread.previous_response_id, None, + "previous_response_id must be cleared on model switch"); + } + + #[test] + fn test_same_model_preserves_previous_response_id() { + let mut thread = ThreadParams::default(); + thread.model = "openai/gpt-4".into(); + thread.previous_response_id = Some("resp_abc123".to_string()); + + let patch = json!({"model": "openai/gpt-4"}); + let (changed, _) = apply_setparams_patch(&mut thread, &patch); + + assert!(!changed); + assert_eq!(thread.previous_response_id, Some("resp_abc123".to_string()), + "previous_response_id should be preserved when model doesn't change"); + } +} diff --git a/refact-agent/engine/src/chat/session.rs b/refact-agent/engine/src/chat/session.rs new file mode 100644 index 000000000..80a623843 --- /dev/null +++ b/refact-agent/engine/src/chat/session.rs @@ -0,0 +1,1537 @@ +use std::collections::{HashMap, VecDeque}; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::Instant; +use serde_json::json; +use tokio::sync::{broadcast, Mutex as AMutex, Notify, RwLock as ARwLock}; +use tracing::{info, warn}; +use uuid::Uuid; + +use crate::call_validation::{ChatContent, ChatMessage}; +use crate::global_context::GlobalContext; + +use super::types::*; +use super::types::{session_idle_timeout, session_cleanup_interval}; +use super::config::limits; +use super::trajectories::TrajectoryEvent; + +pub type SessionsMap = Arc>>>>; + +pub fn create_sessions_map() -> SessionsMap { + Arc::new(ARwLock::new(HashMap::new())) +} + +impl ChatSession { + pub fn new(chat_id: String) -> Self { + let (event_tx, _) = broadcast::channel(limits().event_channel_capacity); + Self { + chat_id: chat_id.clone(), + thread: ThreadParams { + id: chat_id, + ..Default::default() + }, + messages: Vec::new(), + runtime: RuntimeState::default(), + draft_message: None, + draft_usage: None, + command_queue: VecDeque::new(), + event_seq: 0, + event_tx, + trajectory_events_tx: None, + recent_request_ids: VecDeque::with_capacity(limits().recent_request_ids_capacity), + abort_flag: Arc::new(AtomicBool::new(false)), + queue_processor_running: Arc::new(AtomicBool::new(false)), + queue_notify: Arc::new(Notify::new()), + last_activity: Instant::now(), + trajectory_dirty: false, + trajectory_version: 0, + created_at: chrono::Utc::now().to_rfc3339(), + closed: false, + external_reload_pending: false, + last_prompt_messages: Vec::new(), + cache_guard_snapshot: None, + cache_guard_force_next: false, + task_agent_error: None, + } + } + + pub fn new_with_trajectory( + chat_id: String, + messages: Vec, + thread: ThreadParams, + created_at: String, + ) -> Self { + let (event_tx, _) = broadcast::channel(limits().event_channel_capacity); + Self { + chat_id, + thread, + messages, + runtime: RuntimeState::default(), + draft_message: None, + draft_usage: None, + command_queue: VecDeque::new(), + event_seq: 0, + event_tx, + trajectory_events_tx: None, + recent_request_ids: VecDeque::with_capacity(limits().recent_request_ids_capacity), + abort_flag: Arc::new(AtomicBool::new(false)), + queue_processor_running: Arc::new(AtomicBool::new(false)), + queue_notify: Arc::new(Notify::new()), + last_activity: Instant::now(), + external_reload_pending: false, + trajectory_dirty: false, + trajectory_version: 0, + created_at, + closed: false, + last_prompt_messages: Vec::new(), + cache_guard_snapshot: None, + cache_guard_force_next: false, + task_agent_error: None, + } + } + + pub fn increment_version(&mut self) { + self.trajectory_version += 1; + self.trajectory_dirty = true; + } + + pub fn touch(&mut self) { + self.last_activity = Instant::now(); + } + + pub fn is_idle_for_cleanup(&self) -> bool { + let is_idle_like = matches!( + self.runtime.state, + SessionState::Idle | SessionState::Completed | SessionState::WaitingUserInput + ); + is_idle_like + && self.command_queue.is_empty() + && self.last_activity.elapsed() > session_idle_timeout() + } + + pub fn close_event_channel(&mut self) { + let (new_tx, _) = broadcast::channel(limits().event_channel_capacity); + self.event_tx = new_tx; + } + + pub fn emit(&mut self, event: ChatEvent) { + self.event_seq += 1; + let envelope = EventEnvelope { + chat_id: self.chat_id.clone(), + seq: self.event_seq, + event, + }; + let _ = self.event_tx.send(envelope); + } + + pub fn snapshot(&self) -> ChatEvent { + let mut messages = self.messages.clone(); + if self.runtime.state == SessionState::Generating { + if let Some(ref draft) = self.draft_message { + messages.push(draft.clone()); + } + } + let mut runtime = self.runtime.clone(); + runtime.queue_size = self.command_queue.len(); + runtime.queued_items = self.build_queued_items(); + ChatEvent::Snapshot { + thread: self.thread.clone(), + runtime, + messages, + } + } + + pub fn is_duplicate_request(&mut self, request_id: &str) -> bool { + if self.recent_request_ids.contains(&request_id.to_string()) { + return true; + } + if self.recent_request_ids.len() >= 100 { + self.recent_request_ids.pop_front(); + } + self.recent_request_ids.push_back(request_id.to_string()); + false + } + + pub fn add_message(&mut self, mut message: ChatMessage) { + if message.message_id.is_empty() { + message.message_id = Uuid::new_v4().to_string(); + } + let index = self.messages.len(); + self.messages.push(message.clone()); + self.emit(ChatEvent::MessageAdded { message, index }); + self.increment_version(); + self.touch(); + } + + pub fn insert_message(&mut self, index: usize, mut message: ChatMessage) { + if message.message_id.is_empty() { + message.message_id = Uuid::new_v4().to_string(); + } + let insert_idx = index.min(self.messages.len()); + self.messages.insert(insert_idx, message.clone()); + self.emit(ChatEvent::MessageAdded { message, index: insert_idx }); + self.increment_version(); + self.touch(); + } + + pub fn update_message(&mut self, message_id: &str, message: ChatMessage) -> Option { + if let Some(idx) = self + .messages + .iter() + .position(|m| m.message_id == message_id) + { + self.messages[idx] = message.clone(); + self.thread.previous_response_id = None; + self.emit(ChatEvent::MessageUpdated { + message_id: message_id.to_string(), + message, + }); + self.increment_version(); + self.touch(); + return Some(idx); + } + None + } + + pub fn remove_message(&mut self, message_id: &str) -> Option { + if let Some(idx) = self + .messages + .iter() + .position(|m| m.message_id == message_id) + { + let msg = &self.messages[idx]; + let role = msg.role.clone(); + let tool_call_ids: Vec = msg.tool_calls + .as_ref() + .map(|tcs| tcs.iter().map(|tc| tc.id.clone()).collect()) + .unwrap_or_default(); + + self.messages.remove(idx); + self.thread.previous_response_id = None; + self.emit(ChatEvent::MessageRemoved { + message_id: message_id.to_string(), + }); + + if role == "assistant" && !tool_call_ids.is_empty() { + let tool_msg_ids: Vec = self.messages + .iter() + .filter(|m| m.role == "tool" && tool_call_ids.contains(&m.tool_call_id)) + .map(|m| m.message_id.clone()) + .collect(); + + for tid in tool_msg_ids { + if let Some(tool_idx) = self.messages.iter().position(|m| m.message_id == tid) { + self.messages.remove(tool_idx); + self.emit(ChatEvent::MessageRemoved { message_id: tid }); + } + } + } + + self.increment_version(); + self.touch(); + return Some(idx); + } + None + } + + pub fn truncate_messages(&mut self, from_index: usize) { + if from_index < self.messages.len() { + self.messages.truncate(from_index); + self.thread.previous_response_id = None; + self.emit(ChatEvent::MessagesTruncated { from_index }); + self.increment_version(); + self.touch(); + } + } + + pub fn set_runtime_state(&mut self, state: SessionState, error: Option) { + let old_state = self.runtime.state; + let old_error = self.runtime.error.clone(); + let was_paused = old_state == SessionState::Paused; + let had_pause_reasons = !self.runtime.pause_reasons.is_empty(); + + self.runtime.state = state; + self.runtime.paused = state == SessionState::Paused; + self.runtime.error = error.clone(); + self.runtime.queue_size = self.command_queue.len(); + self.runtime.queued_items = self.build_queued_items(); + + if state != SessionState::Paused && (was_paused || had_pause_reasons) { + self.runtime.pause_reasons.clear(); + self.runtime.auto_approved_tool_ids.clear(); + self.runtime.accepted_tool_ids.clear(); + self.runtime.paused_message_index = None; + self.emit(ChatEvent::PauseCleared {}); + } + + let state_changed = old_state != state; + let error_changed = old_error != error; + if state_changed || error_changed { + self.emit(ChatEvent::RuntimeUpdated { + state, + error: error.clone(), + }); + self.emit_trajectory_state_change(); + } + } + + fn emit_trajectory_state_change(&self) { + if self.thread.task_meta.is_some() { + return; + } + if let Some(ref tx) = self.trajectory_events_tx { + let state_str = match self.runtime.state { + SessionState::Idle => "idle", + SessionState::Generating => "generating", + SessionState::ExecutingTools => "executing_tools", + SessionState::Paused => "paused", + SessionState::WaitingIde => "waiting_ide", + SessionState::WaitingUserInput => "waiting_user_input", + SessionState::Completed => "completed", + SessionState::Error => "error", + }; + let effective_root = self.thread.root_chat_id.clone().unwrap_or_else(|| self.chat_id.clone()); + let event = TrajectoryEvent { + event_type: "updated".to_string(), + id: self.chat_id.clone(), + updated_at: None, + title: None, + is_title_generated: None, + session_state: Some(state_str.to_string()), + error: self.runtime.error.clone(), + message_count: Some(self.messages.len()), + parent_id: self.thread.parent_id.clone(), + link_type: self.thread.link_type.clone(), + root_chat_id: Some(effective_root), + model: Some(self.thread.model.clone()), + mode: Some(self.thread.mode.clone()), + total_coins: None, + total_lines_added: None, + total_lines_removed: None, + tasks_total: None, + tasks_done: None, + tasks_failed: None, + }; + let _ = tx.send(event); + } + } + + pub fn build_queued_items(&self) -> Vec { + self.command_queue + .iter() + .map(|r| r.to_queued_item()) + .collect() + } + + pub fn emit_queue_update(&mut self) { + self.runtime.queue_size = self.command_queue.len(); + self.runtime.queued_items = self.build_queued_items(); + self.emit(ChatEvent::QueueUpdated { + queue_size: self.runtime.queue_size, + queued_items: self.runtime.queued_items.clone(), + }); + } + + pub fn set_paused_with_reasons_and_auto_approved(&mut self, reasons: Vec, auto_approved_ids: Vec, message_index: Option) { + self.runtime.pause_reasons = reasons.clone(); + self.runtime.auto_approved_tool_ids = auto_approved_ids; + self.runtime.accepted_tool_ids.clear(); + self.runtime.paused_message_index = message_index; + self.emit(ChatEvent::PauseRequired { reasons }); + self.set_runtime_state(SessionState::Paused, None); + } + + pub fn start_stream(&mut self) -> Option<(String, Arc)> { + if self.runtime.state == SessionState::ExecutingTools || self.draft_message.is_some() { + warn!("Attempted to start stream while already executing tools or draft exists"); + return None; + } + self.abort_flag.store(false, Ordering::SeqCst); + let message_id = Uuid::new_v4().to_string(); + self.draft_message = Some(ChatMessage { + message_id: message_id.clone(), + role: "assistant".to_string(), + ..Default::default() + }); + self.draft_usage = None; + self.set_runtime_state(SessionState::Generating, None); + self.emit(ChatEvent::StreamStarted { + message_id: message_id.clone(), + }); + self.touch(); + Some((message_id, self.abort_flag.clone())) + } + + pub fn emit_stream_delta(&mut self, ops: Vec) { + let message_id = match &mut self.draft_message { + Some(draft) => { + for op in &ops { + match op { + DeltaOp::AppendContent { text } => match &mut draft.content { + ChatContent::SimpleText(s) => s.push_str(text), + _ => draft.content = ChatContent::SimpleText(text.clone()), + }, + DeltaOp::AppendReasoning { text } => { + let r = draft.reasoning_content.get_or_insert_with(String::new); + r.push_str(text); + } + DeltaOp::SetToolCalls { tool_calls } => { + draft.tool_calls = serde_json::from_value(json!(tool_calls)).ok(); + } + DeltaOp::SetThinkingBlocks { blocks } => { + draft.thinking_blocks = Some(blocks.clone()); + } + DeltaOp::AddCitation { citation } => { + draft.citations.push(citation.clone()); + } + DeltaOp::AddServerContentBlock { block } => { + draft.server_content_blocks.push(block.clone()); + } + DeltaOp::SetUsage { usage } => { + if let Ok(u) = serde_json::from_value(usage.clone()) { + draft.usage = Some(u); + } + } + DeltaOp::MergeExtra { extra } => { + draft.extra.extend(extra.clone()); + } + } + } + draft.message_id.clone() + } + None => return, + }; + self.emit(ChatEvent::StreamDelta { message_id, ops }); + } + + pub fn finish_stream(&mut self, finish_reason: Option) { + if let Some(mut draft) = self.draft_message.take() { + let has_text_content = match &draft.content { + ChatContent::SimpleText(s) => !s.trim().is_empty(), + ChatContent::Multimodal(v) => !v.is_empty(), + ChatContent::ContextFiles(v) => !v.is_empty(), + }; + let has_structured_data = draft.tool_calls.as_ref().map_or(false, |tc| !tc.is_empty()) + || draft + .reasoning_content + .as_ref() + .map_or(false, |r| !r.trim().is_empty()) + || draft + .thinking_blocks + .as_ref() + .map_or(false, |tb| !tb.is_empty()) + || !draft.citations.is_empty() + || !draft.server_content_blocks.is_empty(); + + self.emit(ChatEvent::StreamFinished { + message_id: draft.message_id.clone(), + finish_reason: finish_reason.clone(), + }); + + if has_text_content || has_structured_data { + draft.finish_reason = finish_reason; + if let Some(usage) = self.draft_usage.take() { + draft.usage = Some(usage); + } + self.add_message(draft); + } else { + tracing::warn!("Discarding empty assistant message"); + self.emit(ChatEvent::MessageRemoved { + message_id: draft.message_id, + }); + } + } + self.set_runtime_state(SessionState::Idle, None); + self.touch(); + } + + pub fn finish_stream_with_error(&mut self, error: String) { + if let Some(mut draft) = self.draft_message.take() { + let has_text_content = match &draft.content { + ChatContent::SimpleText(s) => !s.is_empty(), + ChatContent::Multimodal(v) => !v.is_empty(), + ChatContent::ContextFiles(v) => !v.is_empty(), + }; + let has_structured_data = draft.tool_calls.as_ref().map_or(false, |tc| !tc.is_empty()) + || draft + .reasoning_content + .as_ref() + .map_or(false, |r| !r.is_empty()) + || draft + .thinking_blocks + .as_ref() + .map_or(false, |tb| !tb.is_empty()) + || !draft.citations.is_empty() + || !draft.server_content_blocks.is_empty() + || draft.usage.is_some() + || !draft.extra.is_empty(); + + if has_text_content || has_structured_data { + self.emit(ChatEvent::StreamFinished { + message_id: draft.message_id.clone(), + finish_reason: Some("error".to_string()), + }); + draft.finish_reason = Some("error".to_string()); + if let Some(usage) = self.draft_usage.take() { + draft.usage = Some(usage); + } + self.add_message(draft); + } else { + self.emit(ChatEvent::MessageRemoved { + message_id: draft.message_id, + }); + } + } + self.set_runtime_state(SessionState::Error, Some(error.clone())); + self.touch(); + + // Store task_meta for async notification (need to clone before async) + self.task_agent_error = Some(error); + } + + pub fn abort_stream(&mut self) { + self.abort_flag.store(true, Ordering::SeqCst); + if let Some(draft) = self.draft_message.take() { + self.emit(ChatEvent::StreamFinished { + message_id: draft.message_id.clone(), + finish_reason: Some("abort".to_string()), + }); + self.emit(ChatEvent::MessageRemoved { + message_id: draft.message_id, + }); + } + self.draft_usage = None; + self.set_runtime_state(SessionState::Idle, None); + self.touch(); + self.queue_notify.notify_one(); + } + + pub fn discard_draft_for_pause(&mut self) { + if let Some(draft) = self.draft_message.take() { + self.emit(ChatEvent::MessageRemoved { + message_id: draft.message_id, + }); + } + self.draft_usage = None; + } + + pub fn subscribe(&self) -> broadcast::Receiver { + self.event_tx.subscribe() + } + + pub fn set_title(&mut self, title: String, is_generated: bool) { + self.thread.title = title.clone(); + self.thread.is_title_generated = is_generated; + self.increment_version(); + self.touch(); + self.emit_trajectory_title_change(title); + } + + fn emit_trajectory_title_change(&self, title: String) { + if self.thread.task_meta.is_some() { + return; + } + if let Some(ref tx) = self.trajectory_events_tx { + let effective_root = self.thread.root_chat_id.clone().unwrap_or_else(|| self.chat_id.clone()); + let event = TrajectoryEvent { + event_type: "updated".to_string(), + id: self.chat_id.clone(), + updated_at: Some(chrono::Utc::now().to_rfc3339()), + title: Some(title), + is_title_generated: Some(self.thread.is_title_generated), + session_state: Some(self.runtime.state.to_string()), + error: self.runtime.error.clone(), + message_count: Some(self.messages.len()), + parent_id: self.thread.parent_id.clone(), + link_type: self.thread.link_type.clone(), + root_chat_id: Some(effective_root), + model: Some(self.thread.model.clone()), + mode: Some(self.thread.mode.clone()), + total_coins: None, + total_lines_added: None, + total_lines_removed: None, + tasks_total: None, + tasks_done: None, + tasks_failed: None, + }; + let _ = tx.send(event); + } + } + + pub fn validate_tool_decision(&self, tool_call_id: &str) -> bool { + self.runtime + .pause_reasons + .iter() + .any(|r| r.tool_call_id == tool_call_id) + } + + pub fn process_tool_decisions(&mut self, decisions: &[ToolDecisionItem]) -> Vec { + let mut accepted_ids = Vec::new(); + let mut denied_ids = Vec::new(); + + for decision in decisions { + if !self.validate_tool_decision(&decision.tool_call_id) { + warn!( + "Tool decision for unknown tool_call_id: {}", + decision.tool_call_id + ); + continue; + } + if decision.accepted { + accepted_ids.push(decision.tool_call_id.clone()); + } else { + denied_ids.push(decision.tool_call_id.clone()); + } + } + + self.runtime.pause_reasons.retain(|r| { + !accepted_ids.contains(&r.tool_call_id) && !denied_ids.contains(&r.tool_call_id) + }); + + if self.runtime.pause_reasons.is_empty() { + self.set_runtime_state(SessionState::Idle, None); + } + + accepted_ids + } +} + +pub async fn get_or_create_session_with_trajectory( + gcx: Arc>, + sessions: &SessionsMap, + chat_id: &str, +) -> Arc> { + let maybe_existing = { + let sessions_read = sessions.read().await; + sessions_read.get(chat_id).cloned() + }; + + if let Some(session_arc) = maybe_existing { + let is_closed = { + let session = session_arc.lock().await; + session.closed + }; + if !is_closed { + return session_arc; + } + let mut sessions_write = sessions.write().await; + if let Some(current) = sessions_write.get(chat_id) { + if Arc::ptr_eq(current, &session_arc) { + sessions_write.remove(chat_id); + } + } + } + + let trajectory_events_tx = gcx.read().await.trajectory_events_tx.clone(); + + let (mut session, is_new) = if let Some(mut loaded) = + super::trajectories::load_trajectory_for_chat(gcx.clone(), chat_id).await + { + info!( + "Loaded trajectory for chat {} with {} messages", + chat_id, + loaded.messages.len() + ); + super::trajectories::apply_mode_defaults_to_thread( + gcx.clone(), + &mut loaded.thread, + loaded.auto_approve_editing_tools_present, + loaded.auto_approve_dangerous_commands_present, + ).await; + ( + ChatSession::new_with_trajectory( + chat_id.to_string(), + loaded.messages, + loaded.thread, + loaded.created_at, + ), + false, + ) + } else { + let mut s = ChatSession::new(chat_id.to_string()); + s.increment_version(); + (s, true) + }; + + if is_new { + if let Some(mode_config) = crate::yaml_configs::customization_registry::get_mode_config( + gcx.clone(), + &session.thread.mode, + None, + ).await { + let defaults = &mode_config.thread_defaults; + if let Some(v) = defaults.include_project_info { + session.thread.include_project_info = v; + } + if let Some(v) = defaults.checkpoints_enabled { + session.thread.checkpoints_enabled = v; + } + if let Some(v) = defaults.auto_approve_editing_tools { + session.thread.auto_approve_editing_tools = v; + } + if let Some(v) = defaults.auto_approve_dangerous_commands { + session.thread.auto_approve_dangerous_commands = v; + } + } + } + + session.trajectory_events_tx = trajectory_events_tx.clone(); + + let (session_arc, _inserted) = { + let mut sessions_write = sessions.write().await; + match sessions_write.entry(chat_id.to_string()) { + std::collections::hash_map::Entry::Vacant(e) => { + let arc = Arc::new(AMutex::new(session)); + e.insert(arc.clone()); + (arc, true) + } + std::collections::hash_map::Entry::Occupied(e) => { + (e.get().clone(), false) + } + } + }; + + session_arc +} + +pub fn start_session_cleanup_task(gcx: Arc>) { + tokio::spawn(async move { + let mut interval = tokio::time::interval(session_cleanup_interval()); + loop { + interval.tick().await; + + let sessions = { + let gcx_locked = gcx.read().await; + gcx_locked.chat_sessions.clone() + }; + + let candidates: Vec<(String, Arc>)> = { + let sessions_read = sessions.read().await; + sessions_read + .iter() + .map(|(chat_id, session_arc)| (chat_id.clone(), session_arc.clone())) + .collect() + }; + + let mut to_cleanup = Vec::new(); + for (chat_id, session_arc) in candidates { + let session = session_arc.lock().await; + if session.is_idle_for_cleanup() { + drop(session); + to_cleanup.push((chat_id, session_arc)); + } + } + + if to_cleanup.is_empty() { + continue; + } + + info!("Cleaning up {} idle sessions", to_cleanup.len()); + + for (chat_id, session_arc) in &to_cleanup { + { + let mut session = session_arc.lock().await; + session.closed = true; + session.close_event_channel(); + session.queue_notify.notify_waiters(); + } + { + let mut sessions_write = sessions.write().await; + if let Some(current) = sessions_write.get(chat_id) { + if Arc::ptr_eq(current, session_arc) { + sessions_write.remove(chat_id); + } + } + } + super::trajectories::maybe_save_trajectory(gcx.clone(), session_arc.clone()).await; + info!("Saved trajectory for closed session {}", chat_id); + } + } + }); +} + +#[cfg(test)] +mod tests { + use super::*; + use super::super::types::{ChatCommand, CommandRequest}; + use serde_json::json; + use std::time::Instant; + + fn make_session() -> ChatSession { + ChatSession::new("test-chat".to_string()) + } + + #[test] + fn test_new_session_initial_state() { + let session = make_session(); + assert_eq!(session.chat_id, "test-chat"); + assert_eq!(session.thread.id, "test-chat"); + assert_eq!(session.runtime.state, SessionState::Idle); + assert!(session.messages.is_empty()); + assert!(session.draft_message.is_none()); + assert_eq!(session.event_seq, 0); + assert!(!session.trajectory_dirty); + } + + #[test] + fn test_new_with_trajectory() { + let msg = ChatMessage { + role: "user".into(), + content: ChatContent::SimpleText("hello".into()), + ..Default::default() + }; + let thread = ThreadParams { + id: "traj-1".into(), + title: "Old Chat".into(), + ..Default::default() + }; + let session = ChatSession::new_with_trajectory( + "traj-1".into(), + vec![msg.clone()], + thread, + "2024-01-01T00:00:00Z".into(), + ); + assert_eq!(session.chat_id, "traj-1"); + assert_eq!(session.thread.title, "Old Chat"); + assert_eq!(session.messages.len(), 1); + assert_eq!(session.created_at, "2024-01-01T00:00:00Z"); + } + + #[test] + fn test_emit_increments_seq() { + let mut session = make_session(); + assert_eq!(session.event_seq, 0); + session.emit(ChatEvent::PauseCleared {}); + assert_eq!(session.event_seq, 1); + session.emit(ChatEvent::PauseCleared {}); + assert_eq!(session.event_seq, 2); + } + + #[test] + fn test_emit_sends_correct_envelope() { + let mut session = make_session(); + let mut rx = session.subscribe(); + session.emit(ChatEvent::PauseCleared {}); + let envelope = rx.try_recv().unwrap(); + assert_eq!(envelope.chat_id, "test-chat"); + assert_eq!(envelope.seq, 1); + assert!(matches!(envelope.event, ChatEvent::PauseCleared {})); + } + + #[test] + fn test_snapshot_without_draft() { + let mut session = make_session(); + session.messages.push(ChatMessage { + role: "user".into(), + content: ChatContent::SimpleText("hi".into()), + ..Default::default() + }); + let snap = session.snapshot(); + match snap { + ChatEvent::Snapshot { messages, .. } => { + assert_eq!(messages.len(), 1); + } + _ => panic!("Expected Snapshot"), + } + } + + #[test] + fn test_snapshot_includes_draft_when_generating() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: "partial".into(), + }]); + let snap = session.snapshot(); + match snap { + ChatEvent::Snapshot { + messages, runtime, .. + } => { + assert_eq!(runtime.state, SessionState::Generating); + assert_eq!(messages.len(), 1); + match &messages[0].content { + ChatContent::SimpleText(s) => assert_eq!(s, "partial"), + _ => panic!("Expected SimpleText"), + } + } + _ => panic!("Expected Snapshot"), + } + } + + #[test] + fn test_is_duplicate_request_detects_duplicates() { + let mut session = make_session(); + assert!(!session.is_duplicate_request("req-1")); + assert!(session.is_duplicate_request("req-1")); + assert!(!session.is_duplicate_request("req-2")); + assert!(session.is_duplicate_request("req-2")); + } + + #[test] + fn test_is_duplicate_request_caps_at_100() { + let mut session = make_session(); + for i in 0..100 { + session.is_duplicate_request(&format!("req-{}", i)); + } + assert_eq!(session.recent_request_ids.len(), 100); + session.is_duplicate_request("req-100"); + assert_eq!(session.recent_request_ids.len(), 100); + assert!(!session.recent_request_ids.contains(&"req-0".to_string())); + assert!(session.recent_request_ids.contains(&"req-100".to_string())); + } + + #[test] + fn test_add_message_generates_id_if_empty() { + let mut session = make_session(); + let msg = ChatMessage { + role: "user".into(), + content: ChatContent::SimpleText("hi".into()), + ..Default::default() + }; + session.add_message(msg); + assert!(!session.messages[0].message_id.is_empty()); + assert!(session.trajectory_dirty); + } + + #[test] + fn test_add_message_preserves_existing_id() { + let mut session = make_session(); + let msg = ChatMessage { + message_id: "custom-id".into(), + role: "user".into(), + content: ChatContent::SimpleText("hi".into()), + ..Default::default() + }; + session.add_message(msg); + assert_eq!(session.messages[0].message_id, "custom-id"); + } + + #[test] + fn test_update_message_returns_index() { + let mut session = make_session(); + let msg = ChatMessage { + message_id: "m1".into(), + role: "user".into(), + content: ChatContent::SimpleText("original".into()), + ..Default::default() + }; + session.messages.push(msg); + let updated = ChatMessage { + message_id: "m1".into(), + role: "user".into(), + content: ChatContent::SimpleText("updated".into()), + ..Default::default() + }; + let idx = session.update_message("m1", updated); + assert_eq!(idx, Some(0)); + match &session.messages[0].content { + ChatContent::SimpleText(s) => assert_eq!(s, "updated"), + _ => panic!("Expected SimpleText"), + } + } + + #[test] + fn test_update_message_unknown_id_returns_none() { + let mut session = make_session(); + let msg = ChatMessage::default(); + assert!(session.update_message("unknown", msg).is_none()); + } + + #[test] + fn test_remove_message_returns_index() { + let mut session = make_session(); + session.messages.push(ChatMessage { + message_id: "m1".into(), + ..Default::default() + }); + session.messages.push(ChatMessage { + message_id: "m2".into(), + ..Default::default() + }); + let idx = session.remove_message("m1"); + assert_eq!(idx, Some(0)); + assert_eq!(session.messages.len(), 1); + assert_eq!(session.messages[0].message_id, "m2"); + } + + #[test] + fn test_remove_message_unknown_id_returns_none() { + let mut session = make_session(); + assert!(session.remove_message("unknown").is_none()); + } + + #[test] + fn test_truncate_messages() { + let mut session = make_session(); + for i in 0..5 { + session.messages.push(ChatMessage { + message_id: format!("m{}", i), + ..Default::default() + }); + } + session.truncate_messages(2); + assert_eq!(session.messages.len(), 2); + assert_eq!(session.messages[1].message_id, "m1"); + } + + #[test] + fn test_truncate_beyond_length_is_noop() { + let mut session = make_session(); + session.messages.push(ChatMessage::default()); + let version_before = session.trajectory_version; + session.truncate_messages(10); + assert_eq!(session.messages.len(), 1); + assert_eq!(session.trajectory_version, version_before); + } + + #[test] + fn test_start_stream_returns_message_id() { + let mut session = make_session(); + let result = session.start_stream(); + assert!(result.is_some()); + let (msg_id, abort_flag) = result.unwrap(); + assert!(!msg_id.is_empty()); + assert!(!abort_flag.load(std::sync::atomic::Ordering::SeqCst)); + assert_eq!(session.runtime.state, SessionState::Generating); + assert!(session.draft_message.is_some()); + } + + #[test] + fn test_start_stream_fails_if_already_generating() { + let mut session = make_session(); + session.start_stream(); + let result = session.start_stream(); + assert!(result.is_none()); + } + + #[test] + fn test_start_stream_fails_if_executing_tools() { + let mut session = make_session(); + session.set_runtime_state(SessionState::ExecutingTools, None); + let result = session.start_stream(); + assert!(result.is_none()); + } + + #[test] + fn test_emit_stream_delta_appends_content() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: "Hello".into(), + }]); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: " World".into(), + }]); + let draft = session.draft_message.as_ref().unwrap(); + match &draft.content { + ChatContent::SimpleText(s) => assert_eq!(s, "Hello World"), + _ => panic!("Expected SimpleText"), + } + } + + #[test] + fn test_emit_stream_delta_appends_reasoning() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendReasoning { + text: "think".into(), + }]); + session.emit_stream_delta(vec![DeltaOp::AppendReasoning { text: "ing".into() }]); + let draft = session.draft_message.as_ref().unwrap(); + assert_eq!(draft.reasoning_content.as_ref().unwrap(), "thinking"); + } + + #[test] + fn test_emit_stream_delta_sets_tool_calls() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::SetToolCalls { + tool_calls: vec![ + json!({"id":"tc1","type":"function","function":{"name":"test","arguments":"{}"}}), + ], + }]); + let draft = session.draft_message.as_ref().unwrap(); + assert!(draft.tool_calls.is_some()); + assert_eq!(draft.tool_calls.as_ref().unwrap().len(), 1); + } + + #[test] + fn test_emit_stream_delta_without_draft_is_noop() { + let mut session = make_session(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { text: "x".into() }]); + assert!(session.draft_message.is_none()); + } + + #[test] + fn test_finish_stream_adds_message() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: "done".into(), + }]); + session.finish_stream(Some("stop".into())); + assert!(session.draft_message.is_none()); + assert_eq!(session.messages.len(), 1); + assert_eq!(session.messages[0].finish_reason, Some("stop".into())); + assert_eq!(session.runtime.state, SessionState::Idle); + } + + #[test] + fn test_finish_stream_with_error_keeps_content() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: "partial".into(), + }]); + session.finish_stream_with_error("timeout".into()); + assert_eq!(session.messages.len(), 1); + assert_eq!(session.messages[0].finish_reason, Some("error".into())); + assert_eq!(session.runtime.state, SessionState::Error); + assert_eq!(session.runtime.error, Some("timeout".into())); + } + + #[test] + fn test_finish_stream_with_error_keeps_structured_data() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::SetToolCalls { + tool_calls: vec![ + json!({"id":"tc1","type":"function","function":{"name":"test","arguments":"{}"}}), + ], + }]); + session.finish_stream_with_error("error".into()); + assert_eq!(session.messages.len(), 1); + } + + #[test] + fn test_finish_stream_with_error_removes_empty_draft() { + let mut session = make_session(); + let mut rx = session.subscribe(); + session.start_stream(); + session.finish_stream_with_error("error".into()); + assert!(session.messages.is_empty()); + let mut found_removed = false; + while let Ok(env) = rx.try_recv() { + if matches!(env.event, ChatEvent::MessageRemoved { .. }) { + found_removed = true; + } + } + assert!(found_removed); + } + + #[test] + fn test_abort_stream() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![DeltaOp::AppendContent { + text: "partial".into(), + }]); + session.abort_stream(); + assert!(session.draft_message.is_none()); + assert!(session.messages.is_empty()); + assert!(session.abort_flag.load(std::sync::atomic::Ordering::SeqCst)); + assert_eq!(session.runtime.state, SessionState::Idle); + } + + #[test] + fn test_set_runtime_state_clears_pause_on_transition() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + session.set_runtime_state(SessionState::Paused, None); + assert!(!session.runtime.pause_reasons.is_empty()); + session.set_runtime_state(SessionState::Idle, None); + assert!(session.runtime.pause_reasons.is_empty()); + } + + #[test] + fn test_set_paused_with_reasons_and_auto_approved() { + let mut session = make_session(); + let mut rx = session.subscribe(); + let reasons = vec![PauseReason { + reason_type: "confirmation".into(), + tool_name: "shell".into(), + command: "shell".into(), + rule: "ask".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }]; + session.set_paused_with_reasons_and_auto_approved(reasons.clone(), vec!["tc2".into()], Some(0)); + assert_eq!(session.runtime.state, SessionState::Paused); + assert_eq!(session.runtime.pause_reasons.len(), 1); + assert_eq!(session.runtime.auto_approved_tool_ids, vec!["tc2".to_string()]); + assert_eq!(session.runtime.paused_message_index, Some(0)); + let mut found_pause_required = false; + while let Ok(env) = rx.try_recv() { + if matches!(env.event, ChatEvent::PauseRequired { .. }) { + found_pause_required = true; + } + } + assert!(found_pause_required); + } + + #[test] + fn test_set_title() { + let mut session = make_session(); + session.set_title("New Title".into(), true); + assert_eq!(session.thread.title, "New Title"); + assert!(session.thread.is_title_generated); + assert!(session.trajectory_dirty); + } + + #[test] + fn test_validate_tool_decision() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + assert!(session.validate_tool_decision("tc1")); + assert!(!session.validate_tool_decision("unknown")); + } + + #[test] + fn test_process_tool_decisions_accepts() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc2".into(), + integr_config_path: None, + }); + session.set_runtime_state(SessionState::Paused, None); + let accepted = session.process_tool_decisions(&[ToolDecisionItem { + tool_call_id: "tc1".into(), + accepted: true, + }]); + assert_eq!(accepted, vec!["tc1"]); + assert_eq!(session.runtime.pause_reasons.len(), 1); + assert_eq!(session.runtime.state, SessionState::Paused); + } + + #[test] + fn test_process_tool_decisions_denies() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + session.set_runtime_state(SessionState::Paused, None); + let accepted = session.process_tool_decisions(&[ToolDecisionItem { + tool_call_id: "tc1".into(), + accepted: false, + }]); + assert!(accepted.is_empty()); + assert!(session.runtime.pause_reasons.is_empty()); + assert_eq!(session.runtime.state, SessionState::Idle); + } + + #[test] + fn test_process_tool_decisions_ignores_unknown() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + session.set_runtime_state(SessionState::Paused, None); + let accepted = session.process_tool_decisions(&[ToolDecisionItem { + tool_call_id: "unknown".into(), + accepted: true, + }]); + assert!(accepted.is_empty()); + assert_eq!(session.runtime.pause_reasons.len(), 1); + } + + #[test] + fn test_process_tool_decisions_transitions_to_idle_when_empty() { + let mut session = make_session(); + session.runtime.pause_reasons.push(PauseReason { + reason_type: "test".into(), + tool_name: "test_tool".into(), + command: "cmd".into(), + rule: "rule".into(), + tool_call_id: "tc1".into(), + integr_config_path: None, + }); + session.set_runtime_state(SessionState::Paused, None); + session.process_tool_decisions(&[ToolDecisionItem { + tool_call_id: "tc1".into(), + accepted: true, + }]); + assert!(session.runtime.pause_reasons.is_empty()); + assert_eq!(session.runtime.state, SessionState::Idle); + } + + #[test] + fn test_increment_version() { + let mut session = make_session(); + assert_eq!(session.trajectory_version, 0); + assert!(!session.trajectory_dirty); + session.increment_version(); + assert_eq!(session.trajectory_version, 1); + assert!(session.trajectory_dirty); + } + + #[test] + fn test_create_sessions_map() { + let map = create_sessions_map(); + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let read = map.read().await; + assert!(read.is_empty()); + }); + } + + #[test] + fn test_build_queued_items() { + let mut session = make_session(); + session.command_queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("hello"), + attachments: vec![], + }, + }); + session.command_queue.push_back(CommandRequest { + client_request_id: "req-2".into(), + priority: true, + command: ChatCommand::Abort {}, + }); + let items = session.build_queued_items(); + assert_eq!(items.len(), 2); + assert_eq!(items[0].client_request_id, "req-1"); + assert!(!items[0].priority); + assert_eq!(items[0].command_type, "user_message"); + assert_eq!(items[1].client_request_id, "req-2"); + assert!(items[1].priority); + assert_eq!(items[1].command_type, "abort"); + } + + #[test] + fn test_emit_queue_update_syncs_runtime() { + let mut session = make_session(); + session.command_queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::Abort {}, + }); + session.emit_queue_update(); + assert_eq!(session.runtime.queue_size, 1); + assert_eq!(session.runtime.queued_items.len(), 1); + } + + #[test] + fn test_set_runtime_state_syncs_queued_items() { + let mut session = make_session(); + session.command_queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: true, + command: ChatCommand::Abort {}, + }); + session.set_runtime_state(SessionState::Generating, None); + assert_eq!(session.runtime.queued_items.len(), 1); + assert_eq!(session.runtime.queued_items[0].client_request_id, "req-1"); + } + + #[test] + fn test_snapshot_includes_queued_items() { + let mut session = make_session(); + session.command_queue.push_back(CommandRequest { + client_request_id: "req-1".into(), + priority: false, + command: ChatCommand::UserMessage { + content: json!("test"), + attachments: vec![], + }, + }); + let snap = session.snapshot(); + match snap { + ChatEvent::Snapshot { runtime, .. } => { + assert_eq!(runtime.queue_size, 1); + assert_eq!(runtime.queued_items.len(), 1); + assert_eq!(runtime.queued_items[0].client_request_id, "req-1"); + } + _ => panic!("Expected Snapshot"), + } + } + + #[test] + fn test_touch_updates_last_activity() { + let mut session = make_session(); + let before = session.last_activity; + std::thread::sleep(std::time::Duration::from_millis(10)); + session.touch(); + assert!(session.last_activity > before); + } + + #[test] + fn test_finish_stream_keeps_server_content_blocks_only_message() { + let mut session = make_session(); + session.start_stream(); + session.emit_stream_delta(vec![ + DeltaOp::AddServerContentBlock { + block: json!({ + "type": "server_tool_use", + "id": "srvtoolu_test", + "name": "web_search", + "input": {"query": "test"} + }), + }, + DeltaOp::AddServerContentBlock { + block: json!({ + "type": "web_search_tool_result", + "tool_use_id": "srvtoolu_test", + "content": [{"type": "web_search_result", "title": "Result", "url": "https://example.com"}] + }), + }, + ]); + session.finish_stream(Some("stop".to_string())); + + assert_eq!(session.messages.len(), 1, + "Server-blocks-only assistant message should be preserved"); + assert_eq!(session.messages[0].server_content_blocks.len(), 2); + assert_eq!(session.messages[0].role, "assistant"); + } + + #[test] + fn test_finish_stream_discards_truly_empty_message() { + let mut session = make_session(); + session.start_stream(); + // No deltas at all + session.finish_stream(Some("stop".to_string())); + + assert_eq!(session.messages.len(), 0, + "Truly empty assistant message should be discarded"); + } + + #[test] + #[ignore] + fn stress_emit_and_snapshot_large_history_baseline() { + const MESSAGE_COUNT: usize = 2_000; + const MESSAGE_SIZE: usize = 2_048; + const SNAPSHOT_RUNS: usize = 200; + + let mut session = make_session(); + + for i in 0..MESSAGE_COUNT { + session.add_message(ChatMessage { + message_id: format!("m{}", i), + role: if i % 2 == 0 { "user".to_string() } else { "assistant".to_string() }, + content: ChatContent::SimpleText("x".repeat(MESSAGE_SIZE)), + ..Default::default() + }); + } + + let emit_start = Instant::now(); + for _ in 0..1_500 { + session.emit(ChatEvent::QueueUpdated { + queue_size: 0, + queued_items: vec![], + }); + } + let emit_elapsed = emit_start.elapsed(); + + let snapshot_start = Instant::now(); + for _ in 0..SNAPSHOT_RUNS { + let snapshot = session.snapshot(); + if let ChatEvent::Snapshot { messages, .. } = snapshot { + assert_eq!(messages.len(), MESSAGE_COUNT); + } else { + panic!("Expected Snapshot event"); + } + } + let snapshot_elapsed = snapshot_start.elapsed(); + + println!( + "STRESS_BASELINE session_emit_snapshot: messages={}, msg_size={}, emits=1500, snapshots={}, emit_ms={}, snapshot_ms={}", + MESSAGE_COUNT, + MESSAGE_SIZE, + SNAPSHOT_RUNS, + emit_elapsed.as_millis(), + snapshot_elapsed.as_millis(), + ); + } + + #[test] + #[ignore] + fn stress_broadcast_lag_recovery_baseline() { + let event_count = limits().event_channel_capacity * 3; + + let mut session = make_session(); + let mut slow_rx = session.subscribe(); + + let emit_start = Instant::now(); + for i in 0..event_count { + session.emit(ChatEvent::MessageAdded { + message: ChatMessage { + message_id: format!("lag-{}", i), + role: "assistant".to_string(), + content: ChatContent::SimpleText("delta".to_string()), + ..Default::default() + }, + index: i, + }); + } + let emit_elapsed = emit_start.elapsed(); + + let recv_start = Instant::now(); + let mut received = 0usize; + let mut lagged = 0usize; + loop { + match slow_rx.try_recv() { + Ok(_envelope) => { + received += 1; + } + Err(tokio::sync::broadcast::error::TryRecvError::Lagged(_skipped)) => { + lagged += 1; + } + Err(tokio::sync::broadcast::error::TryRecvError::Empty) + | Err(tokio::sync::broadcast::error::TryRecvError::Closed) => { + break; + } + } + } + let recv_elapsed = recv_start.elapsed(); + + assert!(lagged > 0, "Expected lagged receiver under saturation"); + + println!( + "STRESS_BASELINE broadcast_lag: emitted={}, received={}, lagged_events={}, emit_ms={}, drain_ms={}, channel_capacity={}", + event_count, + received, + lagged, + emit_elapsed.as_millis(), + recv_elapsed.as_millis(), + limits().event_channel_capacity, + ); + } +} diff --git a/refact-agent/engine/src/chat/stream_core.rs b/refact-agent/engine/src/chat/stream_core.rs new file mode 100644 index 000000000..6627f8c8a --- /dev/null +++ b/refact-agent/engine/src/chat/stream_core.rs @@ -0,0 +1,1365 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::Instant; +use futures::StreamExt; +use eventsource_stream::Eventsource; +use serde_json::{json, Value}; +use tokio::sync::RwLock as ARwLock; + +use crate::call_validation::ChatUsage; +use crate::caps::BaseModelRecord; +use crate::global_context::GlobalContext; +use crate::llm::{LlmRequest, LlmStreamDelta, get_adapter, safe_truncate}; +use crate::llm::adapter::{AdapterSettings, StreamParseError}; + +use super::types::{DeltaOp, stream_heartbeat, stream_idle_timeout, stream_total_timeout}; +use super::openai_merge::ToolCallAccumulator; + +fn merge_usage(existing: Option, incoming: ChatUsage) -> ChatUsage { + match existing { + None => incoming, + Some(prev) => { + let prev_cache_read = prev.cache_read_tokens.unwrap_or(0); + let incoming_cache_read = incoming.cache_read_tokens.unwrap_or(0); + let cache_read_increased = incoming_cache_read > prev_cache_read; + + let merged_cache_creation = match (prev.cache_creation_tokens, incoming.cache_creation_tokens) { + (Some(a), Some(b)) => Some(std::cmp::max(a, b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(b), + (None, None) => None, + }; + let merged_cache_read = match (prev.cache_read_tokens, incoming.cache_read_tokens) { + (Some(a), Some(b)) => Some(std::cmp::max(a, b)), + (Some(a), None) => Some(a), + (None, Some(b)) => Some(b), + (None, None) => None, + }; + + let merged_prompt_tokens = if cache_read_increased { + incoming.prompt_tokens + } else if prev.prompt_tokens == 0 && incoming.prompt_tokens > 0 { + incoming.prompt_tokens + } else { + std::cmp::max(prev.prompt_tokens, incoming.prompt_tokens) + }; + + let merged_completion = std::cmp::max(prev.completion_tokens, incoming.completion_tokens); + + let merged_metering = match (prev.metering_usd, incoming.metering_usd) { + (_, Some(b)) => Some(b), + (Some(a), None) => Some(a), + (None, None) => None, + }; + + let merged_total = merged_prompt_tokens + + merged_completion + + merged_cache_creation.unwrap_or(0) + + merged_cache_read.unwrap_or(0); + + ChatUsage { + prompt_tokens: merged_prompt_tokens, + completion_tokens: merged_completion, + total_tokens: merged_total, + cache_creation_tokens: merged_cache_creation, + cache_read_tokens: merged_cache_read, + metering_usd: merged_metering, + } + } + } +} + +pub struct StreamRunParams { + pub llm_request: LlmRequest, + pub model_rec: BaseModelRecord, + pub chat_id: Option, + pub abort_flag: Option>, + pub supports_tools: bool, + pub supports_reasoning: bool, + pub reasoning_type: Option, + pub supports_temperature: bool, +} + +#[derive(Default, Clone)] +pub struct ChoiceFinal { + pub content: String, + pub reasoning: String, + pub thinking_blocks: Vec, + pub tool_calls_raw: Vec, + pub citations: Vec, + pub server_content_blocks: Vec, + pub extra: serde_json::Map, + pub finish_reason: Option, + pub usage: Option, +} + +pub trait StreamCollector: Send { + fn on_delta_ops(&mut self, choice_idx: usize, ops: Vec); + fn on_usage(&mut self, usage: &ChatUsage); + fn on_finish(&mut self, choice_idx: usize, finish_reason: Option); +} + +const THINK_OPEN_TAG: &str = ""; +const THINK_CLOSE_TAG: &str = ""; + +fn find_ascii_case_insensitive(haystack: &str, needle: &str) -> Option { + if needle.is_empty() { + return Some(0); + } + for (idx, _) in haystack.match_indices('<') { + if idx + needle.len() > haystack.len() { + continue; + } + if let Some(candidate) = haystack.get(idx..idx + needle.len()) { + if candidate.eq_ignore_ascii_case(needle) { + return Some(idx); + } + } + } + None +} + +fn split_with_partial_tag_suffix<'a>(text: &'a str, tag: &str) -> (&'a str, &'a str) { + if let Some(last_lt) = text.rfind('<') { + let suffix = &text[last_lt..]; + if suffix.len() < tag.len() { + if let Some(tag_prefix) = tag.get(..suffix.len()) { + if suffix.eq_ignore_ascii_case(tag_prefix) { + return (&text[..last_lt], suffix); + } + } + } + } + (text, "") +} + +fn push_content_delta( + acc: &mut ChoiceAccumulator, + ops: &mut Vec, + text: String, + block_index: Option, +) { + if text.is_empty() { + return; + } + acc.content.push_str(&text); + if let Some(idx) = block_index { + acc.content_per_block.entry(idx).or_default().push_str(&text); + } + ops.push(DeltaOp::AppendContent { text }); +} + +fn push_reasoning_delta( + acc: &mut ChoiceAccumulator, + ops: &mut Vec, + text: String, + block_index: Option, +) { + if text.is_empty() { + return; + } + acc.reasoning.push_str(&text); + if let Some(idx) = block_index { + acc.reasoning_per_block.entry(idx).or_default().push_str(&text); + } + ops.push(DeltaOp::AppendReasoning { text }); +} + +fn route_append_content_with_think_tags( + acc: &mut ChoiceAccumulator, + ops: &mut Vec, + incoming_text: String, + block_index: Option, +) { + if !acc.inside_think_tag && acc.pending_think_parse.is_empty() && !incoming_text.contains('<') { + push_content_delta(acc, ops, incoming_text, block_index); + return; + } + + acc.pending_think_parse.push_str(&incoming_text); + + loop { + if acc.inside_think_tag { + if let Some(close_idx) = find_ascii_case_insensitive(&acc.pending_think_parse, THINK_CLOSE_TAG) { + let reasoning_text = acc.pending_think_parse[..close_idx].to_string(); + push_reasoning_delta(acc, ops, reasoning_text, block_index); + let drain_until = close_idx + THINK_CLOSE_TAG.len(); + acc.pending_think_parse.drain(..drain_until); + acc.inside_think_tag = false; + continue; + } + + let (emit, keep) = split_with_partial_tag_suffix(&acc.pending_think_parse, THINK_CLOSE_TAG); + let reasoning_text = emit.to_string(); + let keep_text = keep.to_string(); + push_reasoning_delta(acc, ops, reasoning_text, block_index); + acc.pending_think_parse = keep_text; + break; + } + + if let Some(open_idx) = find_ascii_case_insensitive(&acc.pending_think_parse, THINK_OPEN_TAG) { + let content_text = acc.pending_think_parse[..open_idx].to_string(); + push_content_delta(acc, ops, content_text, block_index); + let drain_until = open_idx + THINK_OPEN_TAG.len(); + acc.pending_think_parse.drain(..drain_until); + acc.inside_think_tag = true; + continue; + } + + let (emit, keep) = split_with_partial_tag_suffix(&acc.pending_think_parse, THINK_OPEN_TAG); + let content_text = emit.to_string(); + let keep_text = keep.to_string(); + push_content_delta(acc, ops, content_text, block_index); + acc.pending_think_parse = keep_text; + break; + } +} + +fn flush_pending_think_parse(acc: &mut ChoiceAccumulator, ops: &mut Vec) { + if acc.pending_think_parse.is_empty() { + return; + } + + let pending = std::mem::take(&mut acc.pending_think_parse); + if acc.inside_think_tag { + push_reasoning_delta(acc, ops, pending, None); + } else { + push_content_delta(acc, ops, pending, None); + } +} + +fn handle_append_content_delta( + acc: &mut ChoiceAccumulator, + ops: &mut Vec, + text: String, + block_index: Option, +) { + if block_index.is_some() { + flush_pending_think_parse(acc, ops); + push_content_delta(acc, ops, text, block_index); + } else { + route_append_content_with_think_tags(acc, ops, text, block_index); + } +} + + +pub async fn run_llm_stream( + gcx: Arc>, + params: StreamRunParams, + collector: &mut C, +) -> Result, String> { + if params.llm_request.params.n.unwrap_or(1) != 1 { + return Err("Streaming with n > 1 is not supported".to_string()); + } + + let (client, slowdown_arc) = { + let gcx_locked = gcx.read().await; + ( + gcx_locked.http_client.clone(), + gcx_locked.http_client_slowdown.clone(), + ) + }; + + let _ = slowdown_arc.acquire().await; + + let wire_format = params.model_rec.wire_format; + let adapter = get_adapter(wire_format); + + let adapter_settings = AdapterSettings { + api_key: params.model_rec.api_key.clone(), + auth_token: params.model_rec.auth_token.clone(), + endpoint: params.model_rec.endpoint.clone(), + extra_headers: params.model_rec.extra_headers.clone(), + model_name: params.model_rec.name.clone(), + supports_tools: params.supports_tools, + supports_reasoning: params.supports_reasoning, + reasoning_type: params.reasoning_type.clone(), + supports_temperature: params.supports_temperature, + supports_max_completion_tokens: params.model_rec.supports_max_completion_tokens, + support_metadata: params.model_rec.support_metadata, + eof_is_done: params.model_rec.eof_is_done, + supports_web_search: params.model_rec.supports_web_search, + }; + + let http_parts = adapter.build_http(¶ms.llm_request, &adapter_settings) + .map_err(|e| format!("Failed to build LLM request: {}", e))?; + + let mut sanitized_for_commit: Option = None; + if let Some(chat_id) = ¶ms.chat_id { + let session_arc_opt = { + let gcx_locked = gcx.read().await; + let sessions = gcx_locked.chat_sessions.read().await; + sessions.get(chat_id).cloned() + }; + if let Some(session_arc) = session_arc_opt { + sanitized_for_commit = crate::chat::cache_guard::check_or_pause_cache_guard( + gcx.clone(), + session_arc, + ¶ms.llm_request.model_id, + &http_parts.body, + ) + .await?; + } + } + + if http_parts.url.is_empty() { + return Err("LLM endpoint URL is empty".to_string()); + } + + tracing::debug!( + url = %http_parts.url, + model = %params.llm_request.model_id, + messages_count = params.llm_request.messages.len(), + "LLM streaming request" + ); + + let response = client + .post(&http_parts.url) + .headers(http_parts.headers.clone()) + .header(reqwest::header::ACCEPT, "text/event-stream") + .json(&http_parts.body) + .send() + .await + .map_err(|e| format!("LLM request failed: {}", e))?; + + let status = response.status(); + if !status.is_success() { + let text = response.text().await.unwrap_or_default(); + return Err(format_llm_error_body(&format!("{}", status), &text)); + } + + if let (Some(chat_id), Some(sanitized)) = (¶ms.chat_id, sanitized_for_commit) { + let session_arc_opt = { + let gcx_locked = gcx.read().await; + let sessions = gcx_locked.chat_sessions.read().await; + sessions.get(chat_id).cloned() + }; + if let Some(session_arc) = session_arc_opt { + crate::chat::cache_guard::commit_cache_guard_snapshot(session_arc, sanitized).await; + } + } + + let mut stream = response.bytes_stream().eventsource(); + + let mut accumulators: Vec = vec![ChoiceAccumulator::default()]; + let mut stream_done = false; + + let stream_started_at = Instant::now(); + let mut last_event_at = Instant::now(); + let mut heartbeat = tokio::time::interval(stream_heartbeat()); + heartbeat.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + loop { + if stream_done { + break; + } + let event = tokio::select! { + _ = heartbeat.tick() => { + if let Some(ref flag) = params.abort_flag { + if flag.load(Ordering::SeqCst) { + return Err("Aborted".to_string()); + } + } + if stream_started_at.elapsed() > stream_total_timeout() { + return Err("LLM stream timeout".to_string()); + } + if last_event_at.elapsed() > stream_idle_timeout() { + return Err("LLM stream stalled".to_string()); + } + continue; + } + maybe_event = stream.next() => { + match maybe_event { + Some(Ok(ev)) => ev, + Some(Err(e)) => { + return Err(format!("Stream error: {}", e)); + } + None => { + if !stream_done && !adapter_settings.eof_is_done { + return Err("LLM stream ended unexpectedly without completion signal".to_string()); + } + break; + } + } + } + }; + last_event_at = Instant::now(); + + let deltas = match adapter.parse_stream_chunk(&event.data) { + Ok(d) => d, + Err(StreamParseError::Skip) => continue, + Err(StreamParseError::MalformedChunk(e)) => { + tracing::warn!("Malformed stream chunk: {}", e); + continue; + } + Err(StreamParseError::FatalError(e)) => { + return Err(format!("LLM error: {}", e)); + } + }; + + let acc = &mut accumulators[0]; + let mut ops = Vec::new(); + + for delta in deltas { + match delta { + LlmStreamDelta::AppendContent { text, block_index } => { + handle_append_content_delta(acc, &mut ops, text, block_index); + } + LlmStreamDelta::AppendReasoning { text, block_index } => { + flush_pending_think_parse(acc, &mut ops); + push_reasoning_delta(acc, &mut ops, text, block_index); + } + LlmStreamDelta::SetToolCalls { tool_calls } => { + let tool_calls = if !params.model_rec.auth_token.is_empty() { + tool_calls.into_iter().map(|mut tc| { + strip_mcp_prefix_from_tool_call(&mut tc); + tc + }).collect() + } else { + tool_calls + }; + for tc in &tool_calls { + acc.tool_calls.merge(tc); + } + ops.push(DeltaOp::SetToolCalls { tool_calls: acc.tool_calls.finalize() }); + } + LlmStreamDelta::FinalizeToolCalls { tool_calls } => { + let tool_calls = if !params.model_rec.auth_token.is_empty() { + tool_calls.into_iter().map(|mut tc| { + strip_mcp_prefix_from_tool_call(&mut tc); + tc + }).collect() + } else { + tool_calls + }; + for tc in &tool_calls { + acc.tool_calls.set_final(tc); + } + ops.push(DeltaOp::SetToolCalls { tool_calls: acc.tool_calls.finalize() }); + } + LlmStreamDelta::SetThinkingBlocks { blocks } => { + merge_thinking_blocks(&mut acc.thinking_blocks, blocks); + ops.push(DeltaOp::SetThinkingBlocks { blocks: acc.thinking_blocks.clone() }); + } + LlmStreamDelta::AddCitation { citation } => { + acc.citations.push(citation.clone()); + ops.push(DeltaOp::AddCitation { citation }); + } + LlmStreamDelta::AddServerContentBlock { block } => { + acc.server_content_blocks.push(block.clone()); + ops.push(DeltaOp::AddServerContentBlock { block }); + } + LlmStreamDelta::SetUsage { usage } => { + acc.usage = Some(merge_usage(acc.usage.take(), usage.clone())); + if let Some(ref merged) = acc.usage { + collector.on_usage(merged); + ops.push(DeltaOp::SetUsage { usage: json!(merged) }); + } + } + LlmStreamDelta::SetFinishReason { reason } => { + acc.finish_reason = Some(reason); + } + LlmStreamDelta::MergeExtra { extra } => { + for (k, v) in &extra { + match (acc.extra.get_mut(k), v) { + (Some(Value::Array(existing)), Value::Array(incoming)) => { + existing.extend(incoming.clone()); + } + (Some(Value::Object(existing)), Value::Object(incoming)) => { + // Shallow-merge objects. + for (ik, iv) in incoming { + existing.insert(ik.clone(), iv.clone()); + } + } + _ => { + acc.extra.insert(k.clone(), v.clone()); + } + } + } + ops.push(DeltaOp::MergeExtra { extra }); + } + LlmStreamDelta::Done => { + stream_done = true; + break; + } + } + } + + if !ops.is_empty() { + collector.on_delta_ops(0, ops); + } + } + + for (idx, acc) in accumulators.iter_mut().enumerate() { + let mut tail_ops = Vec::new(); + flush_pending_think_parse(acc, &mut tail_ops); + if !tail_ops.is_empty() { + collector.on_delta_ops(idx, tail_ops); + } + } + + let results: Vec = accumulators + .into_iter() + .enumerate() + .map(|(idx, acc)| { + collector.on_finish(idx, acc.finish_reason.clone()); + // Merge accumulated reasoning text into thinking_blocks. + // Three cases: + // 1) Anthropic: thinking_blocks exist with type="thinking" — merge per-block reasoning + // (for interleaved thinking, each block gets its own text via block_index) + // 2) OpenAI: thinking_blocks exist with type="reasoning" — leave opaque, don't modify + // 3) No blocks but reasoning text exists — create a synthetic reasoning block + let thinking_blocks = if !acc.thinking_blocks.is_empty() && !acc.reasoning.is_empty() { + acc.thinking_blocks.into_iter().map(|mut block| { + if let Some(obj) = block.as_object_mut() { + let is_anthropic_thinking = obj.get("type") + .and_then(|t| t.as_str()) == Some("thinking"); + // Check if thinking text is missing or empty/whitespace. + // LiteLLM sends final signed blocks with empty "thinking": "" + // since the text was already streamed via reasoning_content. + let thinking_is_empty = obj.get("thinking") + .and_then(|v| v.as_str()) + .map_or(true, |s| s.trim().is_empty()); + if is_anthropic_thinking && thinking_is_empty { + // Use per-block reasoning when available (interleaved thinking), + // fall back to global reasoning for single-block case. + let block_idx = obj.get("index").and_then(|v| v.as_u64()); + let reasoning_text = block_idx + .and_then(|idx| acc.reasoning_per_block.get(&idx)) + .unwrap_or(&acc.reasoning); + if !reasoning_text.is_empty() { + obj.insert("thinking".to_string(), json!(reasoning_text.clone())); + } + } + } + block + }).collect() + } else if acc.thinking_blocks.is_empty() && !acc.reasoning.is_empty() { + vec![json!({ + "type": "reasoning", + "summary": [{"type": "summary_text", "text": acc.reasoning.clone()}] + })] + } else { + acc.thinking_blocks + }; + + ChoiceFinal { + content: acc.content, + reasoning: acc.reasoning, + thinking_blocks, + tool_calls_raw: acc.tool_calls.finalize(), + citations: acc.citations, + server_content_blocks: acc.server_content_blocks, + extra: { + let mut extra = acc.extra; + if !acc.content_per_block.is_empty() { + let mut text_blocks: Vec<_> = acc.content_per_block + .into_iter() + .collect(); + text_blocks.sort_by_key(|(idx, _)| *idx); + extra.insert( + "_anthropic_text_blocks".to_string(), + json!(text_blocks.into_iter().map(|(idx, text)| { + json!({"index": idx, "text": text}) + }).collect::>()), + ); + } + extra + }, + finish_reason: acc.finish_reason, + usage: acc.usage, + } + }) + .collect(); + + Ok(results) +} + +/// Merges incoming thinking blocks into the accumulator, deduplicating by: +/// 1. `id` field (if present) +/// 2. `(type, index)` pair (Anthropic signature deltas) +/// 3. `(type, signature)` pair (LiteLLM blocks without index) +/// +/// When a duplicate is found, the existing block's signature is updated +/// to the latest value (handles streaming signature updates). +pub(crate) fn merge_thinking_blocks(dst: &mut Vec, incoming: Vec) { + for block in incoming { + let block_type = block.get("type").and_then(|v| v.as_str()).unwrap_or(""); + let block_id = block.get("id").and_then(|v| v.as_str()); + let block_index = block.get("index").and_then(|v| v.as_u64()); + let block_sig = block.get("signature").and_then(|v| v.as_str()); + + let existing_idx = if let Some(id) = block_id { + dst.iter().position(|b| b.get("id").and_then(|v| v.as_str()) == Some(id)) + } else if let Some(idx) = block_index { + dst.iter().position(|b| { + b.get("type").and_then(|v| v.as_str()).unwrap_or("") == block_type + && b.get("index").and_then(|v| v.as_u64()) == Some(idx) + }) + } else if let Some(sig) = block_sig { + dst.iter().position(|b| { + b.get("type").and_then(|v| v.as_str()).unwrap_or("") == block_type + && b.get("signature").and_then(|v| v.as_str()) == Some(sig) + }) + } else { + None + }; + + if let Some(pos) = existing_idx { + if let Some(new_sig) = block.get("signature").and_then(|v| v.as_str()) { + if let Some(obj) = dst[pos].as_object_mut() { + obj.insert("signature".to_string(), json!(new_sig)); + } + } + } else { + dst.push(block); + } + } +} + +#[derive(Default)] +struct ChoiceAccumulator { + content: String, + /// Per-block content text for Anthropic interleaved output. + /// Key is the content block index from the stream. + content_per_block: HashMap, + reasoning: String, + /// Per-block reasoning text for Anthropic interleaved thinking. + /// Key is the content block index from the stream. + reasoning_per_block: HashMap, + thinking_blocks: Vec, + tool_calls: ToolCallAccumulator, + citations: Vec, + server_content_blocks: Vec, + extra: serde_json::Map, + finish_reason: Option, + usage: Option, + pending_think_parse: String, + inside_think_tag: bool, +} + +fn strip_mcp_prefix_from_tool_call(tc: &mut serde_json::Value) { + if let Some(func) = tc.get_mut("function") { + if let Some(name) = func.get("name").and_then(|n| n.as_str()).map(|s| s.to_string()) { + if let Some(stripped) = name.strip_prefix("mcp_") { + func["name"] = serde_json::json!(stripped); + } + } + } +} + +pub fn normalize_tool_call(tc: &serde_json::Value) -> Option { + let function = tc.get("function")?; + let name = function + .get("name") + .and_then(|n| n.as_str()) + .filter(|s| !s.is_empty())?; + + let id = tc + .get("id") + .and_then(|i| i.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| { + format!( + "call_{}", + uuid::Uuid::new_v4().to_string().replace("-", "")[..24].to_string() + ) + }); + + let arguments = match function.get("arguments") { + Some(serde_json::Value::String(s)) if s.trim().starts_with('{') => s.clone(), + Some(serde_json::Value::Object(_)) => serde_json::to_string(&function["arguments"]).unwrap_or_else(|_| "{}".to_string()), + _ => "{}".to_string(), + }; + + let tool_type = tc + .get("type") + .and_then(|t| t.as_str()) + .unwrap_or("function") + .to_string(); + + let index = tc.get("index").and_then(|i| i.as_u64()).map(|i| i as usize); + + let extra_content = tc.get("extra_content").filter(|v| !v.is_null()).cloned(); + + Some(crate::call_validation::ChatToolCall { + id, + index, + function: crate::call_validation::ChatToolFunction { + name: name.to_string(), + arguments, + }, + tool_type, + extra_content, + }) +} + +fn format_llm_error_body(status_label: &str, text: &str) -> String { + if let Ok(json) = serde_json::from_str::(text) { + if let Some(detail) = json.get("detail") { + return format!("LLM error ({}): {}", status_label, detail); + } + if let Some(msg) = json.pointer("/error/message") { + return format!("LLM error ({}): {}", status_label, msg); + } + if let Some(err_obj) = json.get("error") { + return format!("LLM error ({}): {}", status_label, err_obj); + } + } + let preview = safe_truncate(text, 500); + format!("LLM error ({}): {}", status_label, preview) +} + + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_merge_usage_cache_read_appears_later() { + let prev = ChatUsage { + prompt_tokens: 1500, + completion_tokens: 100, + total_tokens: 1600, + cache_creation_tokens: None, + cache_read_tokens: None, + metering_usd: None, + }; + + let incoming = ChatUsage { + prompt_tokens: 500, + completion_tokens: 200, + total_tokens: 1700, + cache_creation_tokens: None, + cache_read_tokens: Some(1000), + metering_usd: None, + }; + + let merged = merge_usage(Some(prev), incoming); + + assert_eq!(merged.prompt_tokens, 500); + assert_eq!(merged.completion_tokens, 200); + assert_eq!(merged.cache_read_tokens, Some(1000)); + assert_eq!(merged.total_tokens, 1700); + } + + #[test] + fn test_merge_usage_prompt_increases_normally() { + let prev = ChatUsage { + prompt_tokens: 500, + completion_tokens: 100, + total_tokens: 600, + cache_creation_tokens: None, + cache_read_tokens: Some(1000), + metering_usd: None, + }; + + let incoming = ChatUsage { + prompt_tokens: 600, + completion_tokens: 150, + total_tokens: 750, + cache_creation_tokens: None, + cache_read_tokens: Some(1000), + metering_usd: None, + }; + + let merged = merge_usage(Some(prev), incoming); + + assert_eq!(merged.prompt_tokens, 600); + assert_eq!(merged.completion_tokens, 150); + } + + #[test] + fn test_merge_usage_from_none() { + let incoming = ChatUsage { + prompt_tokens: 500, + completion_tokens: 200, + total_tokens: 700, + cache_creation_tokens: Some(100), + cache_read_tokens: Some(200), + metering_usd: None, + }; + + let merged = merge_usage(None, incoming.clone()); + + assert_eq!(merged.prompt_tokens, 500); + assert_eq!(merged.completion_tokens, 200); + assert_eq!(merged.cache_creation_tokens, Some(100)); + assert_eq!(merged.cache_read_tokens, Some(200)); + } + + #[test] + fn test_merge_usage_metering_incoming_wins() { + use crate::call_validation::MeteringUsd; + + let prev = ChatUsage { + prompt_tokens: 500, + completion_tokens: 200, + total_tokens: 700, + cache_creation_tokens: None, + cache_read_tokens: None, + metering_usd: Some(MeteringUsd { + prompt_usd: 0.001, + generated_usd: 0.002, + cache_read_usd: None, + cache_creation_usd: None, + total_usd: 0.003, + }), + }; + + let incoming = ChatUsage { + prompt_tokens: 500, + completion_tokens: 300, + total_tokens: 800, + cache_creation_tokens: None, + cache_read_tokens: None, + metering_usd: Some(MeteringUsd { + prompt_usd: 0.002, + generated_usd: 0.004, + cache_read_usd: None, + cache_creation_usd: None, + total_usd: 0.006, + }), + }; + + let merged = merge_usage(Some(prev), incoming); + + assert!(merged.metering_usd.is_some()); + assert_eq!(merged.metering_usd.unwrap().total_usd, 0.006); + } + + /// Helper: simulate accumulator finalization (same logic as run_llm_stream). + fn finalize_accumulator(acc: ChoiceAccumulator) -> ChoiceFinal { + let thinking_blocks = if !acc.thinking_blocks.is_empty() && !acc.reasoning.is_empty() { + acc.thinking_blocks.into_iter().map(|mut block| { + if let Some(obj) = block.as_object_mut() { + let is_anthropic_thinking = obj.get("type") + .and_then(|t| t.as_str()) == Some("thinking"); + let thinking_is_empty = obj.get("thinking") + .and_then(|v| v.as_str()) + .map_or(true, |s| s.trim().is_empty()); + if is_anthropic_thinking && thinking_is_empty { + let block_idx = obj.get("index").and_then(|v| v.as_u64()); + let reasoning_text = block_idx + .and_then(|idx| acc.reasoning_per_block.get(&idx)) + .unwrap_or(&acc.reasoning); + if !reasoning_text.is_empty() { + obj.insert("thinking".to_string(), json!(reasoning_text.clone())); + } + } + } + block + }).collect() + } else if acc.thinking_blocks.is_empty() && !acc.reasoning.is_empty() { + vec![json!({ + "type": "reasoning", + "summary": [{"type": "summary_text", "text": acc.reasoning.clone()}] + })] + } else { + acc.thinking_blocks + }; + + ChoiceFinal { + content: acc.content, + reasoning: acc.reasoning, + thinking_blocks, + tool_calls_raw: acc.tool_calls.finalize(), + citations: acc.citations, + server_content_blocks: acc.server_content_blocks, + extra: acc.extra, + finish_reason: acc.finish_reason, + usage: acc.usage, + } + } + + #[test] + fn test_litellm_empty_thinking_text_gets_reasoning_merged() { + // LiteLLM sends signed thinking blocks with empty "thinking": "" + // because reasoning was already streamed via reasoning_content. + // The accumulator must merge the accumulated reasoning text in. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Let me think about this step by step...".to_string(); + acc.thinking_blocks = vec![json!({ + "type": "thinking", + "thinking": "", + "signature": "sig_abc123" + })]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 1); + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "Let me think about this step by step..." + ); + assert_eq!(result.thinking_blocks[0]["signature"], "sig_abc123"); + } + + #[test] + fn test_litellm_null_thinking_text_gets_reasoning_merged() { + // Edge case: thinking field is present but null (not a string) + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Reasoning text".to_string(); + acc.thinking_blocks = vec![json!({ + "type": "thinking", + "thinking": null, + "signature": "sig_xyz" + })]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 1); + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "Reasoning text" + ); + } + + #[test] + fn test_anthropic_signature_only_block_gets_reasoning() { + // Native Anthropic adapter: signature_delta creates blocks with no "thinking" key. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Deep analysis here".to_string(); + acc.thinking_blocks = vec![json!({ + "index": 0, + "type": "thinking", + "signature": "sig_native" + })]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 1); + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "Deep analysis here" + ); + } + + #[test] + fn test_interleaved_thinking_per_block_reasoning() { + // Anthropic interleaved thinking: multiple thinking blocks at different indices. + // Each block must get only its own reasoning text, not the concatenation. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "First thought...Second thought...".to_string(); + acc.reasoning_per_block.insert(0, "First thought...".to_string()); + acc.reasoning_per_block.insert(4, "Second thought...".to_string()); + acc.thinking_blocks = vec![ + json!({"index": 0, "type": "thinking", "signature": "sig1"}), + json!({"index": 4, "type": "thinking", "signature": "sig2"}), + ]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 2); + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "First thought...", + "Block 0 should get only its own reasoning text" + ); + assert_eq!( + result.thinking_blocks[1]["thinking"].as_str().unwrap(), + "Second thought...", + "Block 4 should get only its own reasoning text" + ); + } + + #[test] + fn test_signature_delta_concatenation() { + // Anthropic sends a single signature_delta per thinking block. + // Some proxies may emit multiple updates; signature must be treated as + // an opaque integrity token, so the latest update must replace the prior. + let mut blocks = vec![json!({ + "index": 0, + "type": "thinking", + })]; + + // First signature chunk + merge_thinking_blocks(&mut blocks, vec![json!({ + "index": 0, + "type": "thinking", + "signature": "abc" + })]); + assert_eq!(blocks[0]["signature"].as_str().unwrap(), "abc"); + + // Second signature chunk — should replace, not concatenate + merge_thinking_blocks(&mut blocks, vec![json!({ + "index": 0, + "type": "thinking", + "signature": "def" + })]); + assert_eq!(blocks[0]["signature"].as_str().unwrap(), "def", + "Signature updates must replace, not concatenate"); + + // Third chunk + merge_thinking_blocks(&mut blocks, vec![json!({ + "index": 0, + "type": "thinking", + "signature": "ghi" + })]); + assert_eq!(blocks[0]["signature"].as_str().unwrap(), "ghi", + "Latest signature update must win"); + } + + #[test] + fn test_thinking_block_with_existing_text_not_overwritten() { + // If a thinking block already has non-empty thinking text (e.g., from LiteLLM + // final chunk that included the text), it should NOT be overwritten. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Streamed reasoning".to_string(); + acc.thinking_blocks = vec![json!({ + "type": "thinking", + "thinking": "Original block text", + "signature": "sig_keep" + })]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 1); + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "Original block text", + "Pre-existing thinking text should be preserved" + ); + } + + #[test] + fn test_redacted_thinking_blocks_unchanged() { + // Redacted thinking blocks should pass through without modification. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Some reasoning".to_string(); + acc.thinking_blocks = vec![ + json!({"type": "thinking", "signature": "sig1"}), + json!({"type": "redacted_thinking", "data": "encrypted_blob"}), + ]; + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 2); + // thinking block gets reasoning merged + assert_eq!(result.thinking_blocks[0]["thinking"].as_str().unwrap(), "Some reasoning"); + // redacted block untouched + assert_eq!(result.thinking_blocks[1]["type"], "redacted_thinking"); + assert_eq!(result.thinking_blocks[1]["data"], "encrypted_blob"); + assert!(result.thinking_blocks[1].get("thinking").is_none()); + } + + #[test] + fn test_synthetic_reasoning_block_when_no_thinking_blocks() { + // When there are no thinking_blocks but reasoning exists, + // a synthetic reasoning block should be created. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Some reasoning from OpenAI".to_string(); + + let result = finalize_accumulator(acc); + + assert_eq!(result.thinking_blocks.len(), 1); + assert_eq!(result.thinking_blocks[0]["type"], "reasoning"); + } + + #[test] + fn test_whitespace_only_thinking_text_gets_replaced() { + // Whitespace-only thinking text should be treated as empty. + let mut acc = ChoiceAccumulator::default(); + acc.reasoning = "Real reasoning".to_string(); + acc.thinking_blocks = vec![json!({ + "type": "thinking", + "thinking": " \n\t ", + "signature": "sig_ws" + })]; + + let result = finalize_accumulator(acc); + + assert_eq!( + result.thinking_blocks[0]["thinking"].as_str().unwrap(), + "Real reasoning", + "Whitespace-only thinking should be replaced with accumulated reasoning" + ); + } + + #[test] + fn test_cache_guard_sanitize_removes_fields() { + let body = serde_json::json!({ + "messages": [ + {"role": "user", "content": [{"type": "text", "text": "x", "cache_control": {"type": "ephemeral"}}]} + ], + "temperature": 0.2, + "max_tokens": 1000, + "reasoning_effort": "high" + }); + + let sanitized = crate::chat::cache_guard::sanitize_body_for_cache_guard(&body); + assert!(sanitized.get("temperature").is_none()); + assert!(sanitized.get("max_tokens").is_none()); + assert_eq!(sanitized["reasoning_effort"], "high"); + assert!(sanitized["messages"][0]["content"][0] + .get("cache_control") + .is_none()); + } + + #[test] + fn test_cache_guard_append_only_prefix_logic() { + let prev = serde_json::json!({ + "messages": [ + {"role": "user", "content": "a"}, + {"role": "assistant", "content": "b"} + ], + "meta": {"chat_id": "c1"} + }); + let next_ok = serde_json::json!({ + "messages": [ + {"role": "user", "content": "a"}, + {"role": "assistant", "content": "b"}, + {"role": "user", "content": "c"} + ], + "meta": {"chat_id": "c1", "request_attempt_id": "r2"} + }); + let next_bad = serde_json::json!({ + "messages": [ + {"role": "user", "content": "a"}, + {"role": "assistant", "content": "CHANGED"} + ], + "meta": {"chat_id": "c1"} + }); + + assert!(crate::chat::cache_guard::is_append_only_prefix(&prev, &next_ok)); + assert!(!crate::chat::cache_guard::is_append_only_prefix(&prev, &next_bad)); + } + + #[test] + fn test_merge_thinking_blocks_dedupe_by_index() { + let mut dst = vec![ + json!({"index": 0, "type": "thinking", "signature": "sig_v1"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"index": 0, "type": "thinking", "signature": "sig_v2"}), + ]); + + assert_eq!(dst.len(), 1, "Same (type, index) should dedupe"); + assert_eq!(dst[0]["signature"], "sig_v2", "Signature should be updated to latest"); + } + + #[test] + fn test_merge_thinking_blocks_streaming_signature_does_not_concat() { + // Even if the upstream sends multiple signature updates, we must NOT + // concatenate them: signatures are integrity-checked by the provider. + let mut dst = vec![ + json!({"index": 0, "type": "thinking", "signature": "sig_part1"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"index": 0, "type": "thinking", "signature": "sig_part2"}), + ]); + + assert_eq!(dst.len(), 1); + assert_eq!(dst[0]["signature"], "sig_part2", "Signature must be replaced, not concatenated"); + } + + #[test] + fn test_merge_thinking_blocks_different_indices_kept() { + let mut dst = Vec::new(); + + merge_thinking_blocks(&mut dst, vec![ + json!({"index": 0, "type": "thinking", "signature": "sig1"}), + json!({"index": 4, "type": "thinking", "signature": "sig2"}), + ]); + + assert_eq!(dst.len(), 2, "Different indices should produce separate blocks"); + } + + #[test] + fn test_merge_thinking_blocks_dedupe_by_signature_no_index() { + // LiteLLM blocks often have no index — dedupe by (type, signature) + let mut dst = vec![ + json!({"type": "thinking", "thinking": "text", "signature": "sig_abc"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"type": "thinking", "thinking": "text", "signature": "sig_abc"}), + ]); + + assert_eq!(dst.len(), 1, "Same (type, signature) without index should dedupe"); + } + + #[test] + fn test_merge_thinking_blocks_different_types_same_index_not_deduped() { + let mut dst = vec![ + json!({"index": 0, "type": "thinking", "signature": "sig1"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"index": 0, "type": "redacted_thinking", "data": "encrypted"}), + ]); + + assert_eq!(dst.len(), 2, + "Different types at same index should not dedupe"); + } + + #[test] + fn test_merge_thinking_blocks_signature_added_to_existing() { + // First block has no signature, second adds it + let mut dst = vec![ + json!({"index": 0, "type": "thinking"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"index": 0, "type": "thinking", "signature": "sig_new"}), + ]); + + assert_eq!(dst.len(), 1); + assert_eq!(dst[0]["signature"], "sig_new", + "Signature should be added to existing block"); + } + + #[test] + fn test_merge_thinking_blocks_dedupe_by_id() { + let mut dst = vec![ + json!({"id": "block_1", "type": "thinking", "signature": "sig_old"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"id": "block_1", "type": "thinking", "signature": "sig_new"}), + ]); + + assert_eq!(dst.len(), 1, "Same id should dedupe"); + assert_eq!(dst[0]["signature"], "sig_new"); + } + + #[test] + fn test_merge_thinking_blocks_no_key_never_dedupes() { + // Blocks with no id, no index, no signature always append + let mut dst = vec![ + json!({"type": "thinking", "thinking": "text1"}), + ]; + + merge_thinking_blocks(&mut dst, vec![ + json!({"type": "thinking", "thinking": "text2"}), + ]); + + assert_eq!(dst.len(), 2, + "Blocks with no dedup key should always append"); + } + + #[test] + fn test_route_append_content_with_think_tags_single_chunk() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + route_append_content_with_think_tags( + &mut acc, + &mut ops, + "before secret after".to_string(), + None, + ); + + assert_eq!(acc.content, "before after"); + assert_eq!(acc.reasoning, "secret"); + assert_eq!(ops.len(), 3); + assert!(matches!(&ops[0], DeltaOp::AppendContent { text } if text == "before ")); + assert!(matches!(&ops[1], DeltaOp::AppendReasoning { text } if text == "secret")); + assert!(matches!(&ops[2], DeltaOp::AppendContent { text } if text == " after")); + } + + #[test] + fn test_route_append_content_with_think_tags_split_open_and_close() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + route_append_content_with_think_tags(&mut acc, &mut ops, "before secret after".to_string(), None); + + assert_eq!(acc.content, "before after"); + assert_eq!(acc.reasoning, "secret"); + assert!(!acc.inside_think_tag); + assert!(acc.pending_think_parse.is_empty()); + } + + #[test] + fn test_route_append_content_with_think_tags_case_insensitive() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + route_append_content_with_think_tags( + &mut acc, + &mut ops, + "ABC".to_string(), + None, + ); + + assert_eq!(acc.content, "AC"); + assert_eq!(acc.reasoning, "B"); + } + + #[test] + fn test_flush_pending_think_parse_outside_think_keeps_text() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + route_append_content_with_think_tags(&mut acc, &mut ops, "hello x b y c".to_string(), + None, + ); + + assert_eq!(acc.content, "a b c"); + assert_eq!(acc.reasoning, "xy"); + } + + #[test] + fn test_route_append_content_with_think_tags_close_without_open_is_content() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + route_append_content_with_think_tags(&mut acc, &mut ops, "a b".to_string(), None); + + assert_eq!(acc.content, "a b"); + assert_eq!(acc.reasoning, ""); + } + + #[test] + fn test_handle_append_content_delta_indexed_keeps_tags_as_content() { + let mut acc = ChoiceAccumulator::default(); + let mut ops = Vec::new(); + + handle_append_content_delta( + &mut acc, + &mut ops, + "before secret after".to_string(), + Some(4), + ); + + assert_eq!(acc.content, "before secret after"); + assert_eq!(acc.reasoning, ""); + assert_eq!(acc.content_per_block.get(&4).map(|s| s.as_str()), Some("before secret after")); + } + +} diff --git a/refact-agent/engine/src/scratchpads/system_context.rs b/refact-agent/engine/src/chat/system_context.rs similarity index 68% rename from refact-agent/engine/src/scratchpads/system_context.rs rename to refact-agent/engine/src/chat/system_context.rs index 1ab3f26be..91fb61379 100644 --- a/refact-agent/engine/src/scratchpads/system_context.rs +++ b/refact-agent/engine/src/chat/system_context.rs @@ -7,8 +7,15 @@ use regex::Regex; use git2::Repository; use crate::at_commands::at_tree::TreeNode; -use crate::call_validation::{ChatMessage, ContextFile}; +use crate::call_validation::{ChatMessage, ChatContent, ContextFile}; use crate::files_correction::{get_project_dirs, paths_from_anywhere}; +use crate::memories::{load_memories_by_tags, MemoRecord}; +use crate::chat::config::limits; +use crate::yaml_configs::project_information::{ + load_project_information_config, to_relative_path, +}; + +pub const PROJECT_CONTEXT_MARKER: &str = "project_context"; use crate::files_in_workspace::detect_vcs_for_a_file_path; use crate::global_context::GlobalContext; use crate::git::operations::{get_git_remotes, get_diff_statuses}; @@ -26,34 +33,17 @@ const INSTRUCTION_FILE_PATTERNS: &[&str] = &[ const RECURSIVE_SEARCH_SKIP_DIRS: &[&str] = &[ "node_modules", - ".git", - ".hg", - ".svn", "target", "build", "dist", "out", - ".next", - ".nuxt", "__pycache__", - ".pytest_cache", - ".mypy_cache", "venv", - ".venv", "env", - ".env", "vendor", - ".cargo", - ".rustup", "coverage", - ".coverage", - ".tox", "eggs", "*.egg-info", - ".gradle", - ".idea", - ".vscode", - ".vs", ]; const RECURSIVE_SEARCH_MAX_DEPTH: usize = 5; @@ -66,9 +56,28 @@ const INSTRUCTION_DIR_PATTERNS: &[(&str, &[&str])] = &[ (".claude", &["settings.json", "settings.local.json"]), (".refact", &["project_summary.yaml", "instructions.md"]), // VSCode - all shareable configs - (".vscode", &["settings.json", "launch.json", "tasks.json", "extensions.json"]), + ( + ".vscode", + &[ + "settings.json", + "launch.json", + "tasks.json", + "extensions.json", + ], + ), // JetBrains IDEs - shareable configs + workspace.xml (filtered) - (".idea", &["workspace.xml", "vcs.xml", "misc.xml", "modules.xml", "compiler.xml", "encodings.xml", "jarRepositories.xml"]), + ( + ".idea", + &[ + "workspace.xml", + "vcs.xml", + "misc.xml", + "modules.xml", + "compiler.xml", + "encodings.xml", + "jarRepositories.xml", + ], + ), (".idea/runConfigurations", &["*.xml"]), (".idea/codeStyles", &["*.xml"]), (".idea/inspectionProfiles", &["*.xml"]), @@ -81,10 +90,18 @@ const ENV_MARKERS: &[(&str, &str, &str)] = &[ // Python ("venv", "python_venv", "Python virtual environment"), (".venv", "python_venv", "Python virtual environment"), - ("env", "python_venv", "Python virtual environment (generic name)"), + ( + "env", + "python_venv", + "Python virtual environment (generic name)", + ), (".env", "python_venv", "Python virtual environment (hidden)"), ("poetry.lock", "poetry", "Poetry dependency manager"), - ("pyproject.toml", "python_project", "Python project (PEP 517/518)"), + ( + "pyproject.toml", + "python_project", + "Python project (PEP 517/518)", + ), ("Pipfile", "pipenv", "Pipenv environment"), ("Pipfile.lock", "pipenv", "Pipenv environment"), ("requirements.txt", "pip", "Pip requirements"), @@ -234,6 +251,8 @@ pub struct InstructionFile { pub processed_content: Option, #[serde(skip)] pub importance: u8, + #[serde(skip)] + pub max_chars: Option, } const PARENT_DIR_SEARCH_MAX_DEPTH: usize = 10; @@ -275,12 +294,15 @@ impl GitInfo { } if !self.branches.is_empty() { - let other_branches: Vec<_> = self.branches.iter() + let other_branches: Vec<_> = self + .branches + .iter() .filter(|b| Some(*b) != self.current_branch.as_ref()) .take(10) .collect(); if !other_branches.is_empty() { - let branch_list = other_branches.iter() + let branch_list = other_branches + .iter() .map(|b| format!("`{}`", b)) .collect::>() .join(", "); @@ -294,7 +316,9 @@ impl GitInfo { } if !self.remotes.is_empty() { - let remote_list = self.remotes.iter() + let remote_list = self + .remotes + .iter() .map(|(name, url)| format!("`{}` → {}", name, url)) .collect::>() .join(", "); @@ -302,27 +326,33 @@ impl GitInfo { } if !self.staged_files.is_empty() { - lines.push(format!("**Staged** ({} files): {}", + lines.push(format!( + "**Staged** ({} files): {}", self.staged_files.len(), format_file_list(&self.staged_files, 5) )); } if !self.modified_files.is_empty() { - lines.push(format!("**Modified** ({} files): {}", + lines.push(format!( + "**Modified** ({} files): {}", self.modified_files.len(), format_file_list(&self.modified_files, 5) )); } if !self.untracked_files.is_empty() { - lines.push(format!("**Untracked** ({} files): {}", + lines.push(format!( + "**Untracked** ({} files): {}", self.untracked_files.len(), format_file_list(&self.untracked_files, 5) )); } - if self.staged_files.is_empty() && self.modified_files.is_empty() && self.untracked_files.is_empty() { + if self.staged_files.is_empty() + && self.modified_files.is_empty() + && self.untracked_files.is_empty() + { lines.push("**Status**: Clean working directory".to_string()); } @@ -331,7 +361,11 @@ impl GitInfo { } fn format_file_list(files: &[String], max_show: usize) -> String { - let shown: Vec<_> = files.iter().take(max_show).map(|f| format!("`{}`", f)).collect(); + let shown: Vec<_> = files + .iter() + .take(max_show) + .map(|f| format!("`{}`", f)) + .collect(); let remaining = files.len().saturating_sub(max_show); if remaining > 0 { format!("{} (+{} more)", shown.join(", "), remaining) @@ -349,6 +383,7 @@ pub struct SystemContext { pub project_tree: Option, pub environment_instructions: String, pub git_info: Vec, + pub memories: Vec, } impl SystemInfo { @@ -378,7 +413,9 @@ impl SystemInfo { datetime_local: now_local.format("%Y-%m-%d %H:%M:%S").to_string(), datetime_utc: now_utc.format("%Y-%m-%d %H:%M:%S UTC").to_string(), timezone: now_local.format("%Z").to_string(), - shell: std::env::var("SHELL").ok().or_else(|| std::env::var("COMSPEC").ok()), + shell: std::env::var("SHELL") + .ok() + .or_else(|| std::env::var("COMSPEC").ok()), } } @@ -442,7 +479,10 @@ impl SystemInfo { "## System Information".to_string(), format!("- **OS**: {} ({})", self.os_version, self.arch), format!("- **User**: {}@{}", self.username, self.hostname), - format!("- **DateTime**: {} ({})", self.datetime_local, self.timezone), + format!( + "- **DateTime**: {} ({})", + self.datetime_local, self.timezone + ), ]; if let Some(shell) = &self.shell { lines.push(format!("- **Shell**: {}", shell)); @@ -523,6 +563,8 @@ fn check_env_active(env_type: &str, marker_path: &Path) -> bool { } } +const MAX_WORKSPACE_XML_CHARS: usize = 15_000; + fn extract_workspace_xml_important_parts(content: &str) -> Option { let mut configs = Vec::new(); @@ -535,11 +577,6 @@ fn extract_workspace_xml_important_parts(content: &str) -> Option { if let Some(run_manager_match) = re.find(content) { let run_manager_xml = run_manager_match.as_str(); - let selected = Regex::new(r#"selected="([^"]*)""#).ok() - .and_then(|r| r.captures(run_manager_xml)) - .and_then(|c| c.get(1)) - .map(|m| m.as_str().to_string()); - let config_pattern = r#"]*>[\s\S]*?"#; if let Ok(config_re) = Regex::new(config_pattern) { for config_match in config_re.find_iter(run_manager_xml) { @@ -559,34 +596,45 @@ fn extract_workspace_xml_important_parts(content: &str) -> Option { return None; } - let mut result = String::from("# IDE Run Configurations\n"); - if let Some(sel) = selected { - result.push_str(&format!("selected: {}\n", sel)); - } - result.push_str("configurations:\n"); + let mut result = String::from("# IDE Run Configurations\nconfigurations:\n"); - for cfg in configs { - result.push_str(&format!(" - name: {}\n", cfg.name)); - result.push_str(&format!(" type: {}\n", cfg.config_type)); - if !cfg.command.is_empty() { - result.push_str(&format!(" command: {}\n", cfg.command)); - } - if !cfg.workdir.is_empty() { - result.push_str(&format!(" workdir: {}\n", cfg.workdir)); - } - if !cfg.envs.is_empty() { - result.push_str(" env:\n"); - for (k, v) in &cfg.envs { - result.push_str(&format!(" {}: {}\n", k, v)); - } + for cfg in &configs { + if result.len() >= MAX_WORKSPACE_XML_CHARS { + result.push_str(&format!( + " # ... and {} more configurations\n", + configs.len() - configs.iter().position(|c| c.name == cfg.name).unwrap_or(0) + )); + break; } - if !cfg.extra.is_empty() { - for (k, v) in &cfg.extra { - result.push_str(&format!(" {}: {}\n", k, v)); - } + + result.push_str(&format!(" - name: {}\n", cfg.name)); + + let env_prefix: String = cfg + .envs + .iter() + .filter(|(k, _)| k != "PYTHONUNBUFFERED") + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(" "); + + let command = if !env_prefix.is_empty() && !cfg.command.is_empty() { + format!("{} {}", env_prefix, cfg.command) + } else if !env_prefix.is_empty() { + env_prefix + } else { + cfg.command.clone() + }; + + if !command.is_empty() { + result.push_str(&format!(" command: {}\n", command)); } } + if result.len() > MAX_WORKSPACE_XML_CHARS { + result.truncate(MAX_WORKSPACE_XML_CHARS); + result.push_str("\n# [truncated]\n"); + } + return Some(result); } @@ -595,11 +643,8 @@ fn extract_workspace_xml_important_parts(content: &str) -> Option { struct RunConfig { name: String, - config_type: String, command: String, - workdir: String, envs: Vec<(String, String)>, - extra: Vec<(String, String)>, } fn parse_run_configuration(config_xml: &str) -> Option { @@ -607,20 +652,12 @@ fn parse_run_configuration(config_xml: &str) -> Option { let config_type = extract_xml_attr(config_xml, "type").unwrap_or_default(); let mut command = String::new(); - let mut workdir = String::new(); let mut envs = Vec::new(); - let mut extra = Vec::new(); if let Some(cmd) = extract_option_value(config_xml, "command") { command = cmd; } - if let Some(wd) = extract_option_value(config_xml, "workingDirectory") { - workdir = wd; - } else if let Some(wd) = extract_option_value(config_xml, "WORKING_DIRECTORY") { - workdir = wd; - } - if let Ok(env_re) = Regex::new(r#""#) { for cap in env_re.captures_iter(config_xml) { if let (Some(k), Some(v)) = (cap.get(1), cap.get(2)) { @@ -641,19 +678,6 @@ fn parse_run_configuration(config_xml: &str) -> Option { } } - if config_type.contains("Cargo") { - if let Some(channel) = extract_option_value(config_xml, "channel") { - if channel != "DEFAULT" { - extra.push(("channel".to_string(), channel)); - } - } - if let Some(bt) = extract_option_value(config_xml, "backtrace") { - if bt != "SHORT" { - extra.push(("backtrace".to_string(), bt)); - } - } - } - if config_type.contains("Python") || config_type.contains("Django") { if let Some(script) = extract_option_value(config_xml, "SCRIPT_NAME") { command = script; @@ -673,31 +697,36 @@ fn parse_run_configuration(config_xml: &str) -> Option { Some(RunConfig { name, - config_type, command, - workdir, envs, - extra, }) } fn extract_xml_attr(xml: &str, attr: &str) -> Option { let pattern = format!(r#"{}="([^"]*)""#, regex::escape(attr)); - Regex::new(&pattern).ok() + Regex::new(&pattern) + .ok() .and_then(|re| re.captures(xml)) .and_then(|cap| cap.get(1)) .map(|m| m.as_str().to_string()) } fn extract_option_value(xml: &str, option_name: &str) -> Option { - let pattern = format!(r#" bool { + if dir_name.starts_with('.') { + return true; + } for skip_pattern in RECURSIVE_SEARCH_SKIP_DIRS { if skip_pattern.starts_with("*.") { if let Some(suffix) = skip_pattern.strip_prefix("*.") { @@ -748,6 +777,7 @@ fn find_instruction_files_recursive( source_tool: determine_tool_source(pattern), processed_content: None, importance: determine_importance(&entry_name), + max_chars: None, }); } break; @@ -787,17 +817,23 @@ pub async fn find_instruction_files(project_dirs: &[PathBuf]) -> Vec Vec String { "gemini.md" => "gemini".to_string(), ".cursorrules" | ".cursor/rules" => "cursor".to_string(), "global_rules.md" | ".windsurf/rules" => "windsurf".to_string(), - "copilot-instructions.md" | ".github" | ".github/instructions" => "github_copilot".to_string(), + "copilot-instructions.md" | ".github" | ".github/instructions" => { + "github_copilot".to_string() + } ".aider.conf.yml" => "aider".to_string(), "refact.md" | ".refact" => "refact".to_string(), _ => "unknown".to_string(), @@ -911,7 +950,10 @@ fn categorize_config(file_name: &str) -> String { "typescript".to_string() } else if lower.contains("commit") || lower.contains("husky") || lower.contains("pre-commit") { "git_hooks".to_string() - } else if lower.contains("mkdocs") || lower.contains("docusaurus") || lower.contains("book.toml") { + } else if lower.contains("mkdocs") + || lower.contains("docusaurus") + || lower.contains("book.toml") + { "documentation".to_string() } else if lower.contains("env") { "environment".to_string() @@ -949,28 +991,33 @@ pub async fn gather_git_info(project_dirs: &[PathBuf]) -> Vec { match Repository::open(&vcs_root) { Ok(repo) => { - let current_branch = repo.head().ok() + let current_branch = repo + .head() + .ok() .and_then(|h| h.shorthand().map(String::from)); - let branches = repo.branches(Some(git2::BranchType::Local)) + let branches = repo + .branches(Some(git2::BranchType::Local)) .map(|branches| { branches .filter_map(|b| b.ok()) - .filter_map(|(branch, _)| branch.name().ok().flatten().map(String::from)) + .filter_map(|(branch, _)| { + branch.name().ok().flatten().map(String::from) + }) .collect() }) .unwrap_or_default(); let remotes = get_git_remotes(&vcs_root).unwrap_or_default(); - let (staged, unstaged) = get_diff_statuses( - git2::StatusShow::IndexAndWorkdir, - &repo, - false - ).unwrap_or_default(); + let (staged, unstaged) = + get_diff_statuses(git2::StatusShow::IndexAndWorkdir, &repo, false) + .unwrap_or_default(); - let staged_files: Vec = staged.iter() + let staged_files: Vec = staged + .iter() .map(|f| f.relative_path.to_string_lossy().to_string()) + .filter(|p| !path_starts_with_hidden(p)) .collect(); let mut modified_files = Vec::new(); @@ -978,6 +1025,9 @@ pub async fn gather_git_info(project_dirs: &[PathBuf]) -> Vec { for file in &unstaged { let path_str = file.relative_path.to_string_lossy().to_string(); + if path_starts_with_hidden(&path_str) { + continue; + } match file.status { crate::git::FileChangeStatus::ADDED => untracked_files.push(path_str), _ => modified_files.push(path_str), @@ -1065,7 +1115,10 @@ pub fn generate_environment_instructions(environments: &[DetectedEnvironment]) - instructions.push("### Python".to_string()); for env in &python_envs { let active_marker = if env.is_active { " ✓ (active)" } else { "" }; - instructions.push(format!("- **{}**: `{}`{}", env.description, env.path, active_marker)); + instructions.push(format!( + "- **{}**: `{}`{}", + env.description, env.path, active_marker + )); } let has_venv = python_envs.iter().any(|e| e.env_type == "python_venv"); @@ -1080,14 +1133,18 @@ pub fn generate_environment_instructions(environments: &[DetectedEnvironment]) - instructions.push("uv run python ".to_string()); instructions.push("```".to_string()); } else if has_poetry { - instructions.push("**Preferred**: Use `poetry` for Python package management:".to_string()); + instructions + .push("**Preferred**: Use `poetry` for Python package management:".to_string()); instructions.push("```bash".to_string()); instructions.push("poetry install".to_string()); instructions.push("poetry run python ".to_string()); instructions.push("```".to_string()); } else if has_venv { if let Some(venv) = python_envs.iter().find(|e| e.env_type == "python_venv") { - instructions.push("**Preferred**: Use the virtual environment directly (no activation needed):".to_string()); + instructions.push( + "**Preferred**: Use the virtual environment directly (no activation needed):" + .to_string(), + ); instructions.push("```bash".to_string()); if cfg!(windows) { instructions.push(format!("{}/Scripts/python.exe ", venv.path)); @@ -1114,7 +1171,8 @@ pub fn generate_environment_instructions(environments: &[DetectedEnvironment]) - instructions.push(String::new()); if has_bun { - instructions.push("**Preferred**: Use `bun` as the runtime/package manager:".to_string()); + instructions + .push("**Preferred**: Use `bun` as the runtime/package manager:".to_string()); instructions.push("```bash".to_string()); instructions.push("bun install".to_string()); instructions.push("bun run - -``` - ---- - -## Architecture - -### High-Level Structure - -``` -┌─────────────────────────────────────────────────────────┐ -│ React Application │ -│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ -│ │ Features │ │ Components │ │ Hooks │ │ -│ │ (Redux) │ │ (UI Layer) │ │ (Logic) │ │ -│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │ -│ │ │ │ │ -│ └──────────────────┴──────────────────┘ │ -│ │ │ -│ ┌───────▼────────┐ │ -│ │ Services │ │ -│ │ RTK Query APIs│ │ -│ └───────┬────────┘ │ -└────────────────────────────┼──────────────────────────────┘ - │ - ┌────────────────┼────────────────┐ - │ │ │ - ┌──────▼──────┐ ┌─────▼─────┐ ┌──────▼──────┐ - │ Local LSP │ │ SmallCloud│ │ IDE (via │ - │ Server │ │ Auth API │ │ postMessage)│ - │ :8001 │ │ │ │ │ - └─────────────┘ └───────────┘ └─────────────┘ -``` - -### Directory Structure - -``` -gui/ -├── src/ -│ ├── features/ # Redux slices + feature components -│ │ ├── Chat/ # Core chat logic (Thread/, actions, selectors) -│ │ ├── History/ # Chat history management -│ │ ├── Checkpoints/ # Workspace rollback system -│ │ ├── Config/ # Global configuration -│ │ ├── Integrations/ # Integration management UI -│ │ ├── Providers/ # LLM provider configuration -│ │ └── ... -│ ├── components/ # Reusable UI components -│ │ ├── Chat/ # Chat container -│ │ ├── ChatContent/ # Message rendering -│ │ ├── ChatForm/ # Input form + controls -│ │ ├── Sidebar/ # Navigation -│ │ └── ... -│ ├── hooks/ # Custom React hooks (60+) -│ ├── services/ # API definitions -│ │ ├── refact/ # LSP server APIs (RTK Query) -│ │ └── smallcloud/ # Cloud auth APIs -│ ├── app/ # Redux store setup -│ ├── events/ # IDE integration types -│ ├── lib/ # Library entry point -│ │ └── render/ # Render function + CSS -│ └── utils/ # Utility functions -├── generated/ # GraphQL codegen output -├── public/ # Static assets -└── dist/ # Build output (git-ignored) -``` - -### Data Flow Patterns - -**1. User Action → State Update → UI Re-render** - -``` -User clicks "Send" - → dispatch(chatAskQuestionThunk) - → sendChat() API call - → streaming chunks arrive - → dispatch(chatResponse) per chunk - → reducer updates state.chat.thread.messages - → React re-renders ChatContent -``` - -**2. IDE Integration (postMessage)** - -``` -IDE Extension ⇄ window.postMessage ⇄ GUI (iframe) - │ │ - ├─ Context updates (active file) ────→│ - │ │ - │←──── Commands (open file, paste) ───┤ -``` - -**3. Tool Calling Flow** - -``` -AI suggests tool_call - → Confirmation popup (if not automatic) - → User approves - → Tool executed (LSP or IDE) - → Result message inserted - → AI continues with result -``` - ---- +React chat UI for AI coding assistant. Builds to `dist/chat/` (browser UMD) and `dist/events/` (Node.js types). Consumed by IDEs (VSCode, JetBrains) and standalone web. ## Tech Stack -### Core Technologies - -| Layer | Technology | Purpose | -| -------------------- | ---------------------------------- | ----------------------------- | -| **UI Framework** | React 18.2 | Component-based UI | -| **Language** | TypeScript 5.8 (strict mode) | Type safety | -| **Build Tool** | Vite 5.0 + SWC | Fast dev server & bundling | -| **State Management** | Redux Toolkit 2.2 | Global state + caching | -| **Data Fetching** | RTK Query | API layer with auto-caching | -| **GraphQL** | urql 4.2 (SmallCloud only) | Auth/user/teams queries | -| **Styling** | CSS Modules + Radix Themes | Scoped styles + design system | -| **UI Components** | Radix UI | Accessible primitives | -| **Testing** | Vitest 3.1 + React Testing Library | Unit & integration tests | -| **Mocking** | MSW 2.3 | API mocking for tests/stories | -| **Storybook** | Storybook 7.6 | Component development | - -### Key Dependencies - -**State & Data** - -- `@reduxjs/toolkit` - Modern Redux with `combineSlices`, RTK Query, middleware -- `redux-persist` - Persist chat history to localStorage -- `urql` - GraphQL client (SmallCloud API only, not for chat) -- `uuid` - Generate chat/message IDs - -**UI Components** - -- `@radix-ui/react-*` - Accordion, Toolbar, Collapsible, Icons -- `@radix-ui/themes` - Design system (colors, spacing, typography) -- `framer-motion` - Animations -- `lottie-react` - Animated icons - -**Utilities** - -- `react-markdown` + `remark-gfm` + `rehype-katex` - Markdown rendering -- `react-syntax-highlighter` - Code highlighting -- `diff` - Generate diffs for file changes -- `echarts-for-react` - Usage statistics charts -- `react-dropzone` - File upload -- `textarea-caret` - Cursor position (autocomplete) - -### Build Configuration - -**Vite Config** (`vite.config.ts`) - -```typescript -{ - plugins: [react(), eslint(), dts()], - build: { - lib: { - entry: 'src/lib/index.ts', // Browser bundle - name: 'RefactChat', - fileName: 'index' - }, - outDir: 'dist/chat' - }, - server: { - proxy: { - '/v1': process.env.REFACT_LSP_URL ?? 'http://127.0.0.1:8001' - } - } -} -``` - -**Dual Build**: Separate config for Node.js types (`vite.node.config.ts` → `dist/events/`) - -**TypeScript Config** - -```typescript -{ - compilerOptions: { - target: 'ES2020', - module: 'ESNext', - moduleResolution: 'bundler', - strict: true, // Full strict mode - jsx: 'react-jsx', - plugins: [ - { name: 'typescript-plugin-css-modules' }, // CSS typing - { name: '@0no-co/graphqlsp' } // GraphQL intellisense - ] - } -} -``` - -**ESLint**: `@typescript-eslint/strict-type-checked` (aggressive type checking) - ---- - -## Getting Started +React 18.2 · TypeScript 5.8 (strict) · Vite 5.0 · Redux Toolkit 2.2 (RTK Query) · Radix UI/Themes · CSS Modules · urql 4.2 (GraphQL, SmallCloud only) · Vitest 3.1 · MSW 2.3 -### Prerequisites - -1. **Node.js 18+** (uses ES2020 features) -2. **Refact LSP Server** running on `http://127.0.0.1:8001` - - Required for chat, tools, caps endpoints - - Get it: https://github.com/smallcloudai/refact-lsp - -### Initial Setup - -```bash -# Install dependencies -npm ci - -# Start dev server -npm run dev -# → http://localhost:5173 - -# With custom LSP URL -REFACT_LSP_URL="http://localhost:8001" npm run dev -``` - -### Environment Variables - -| Variable | Purpose | Default | -| ----------------- | -------------------- | ----------------------- | -| `REFACT_LSP_URL` | Dev proxy target | `http://127.0.0.1:8001` | -| `DEBUG` | Enable debug logging | (unset) | -| `REFACT_LSP_PORT` | Runtime LSP port | `8001` | - -**Debug mode:** +## Quick Start ```bash -DEBUG=refact,app,integrations npm run dev -``` - -### Available Scripts - -```json -{ - "dev": "vite", // Dev server (5173) - "build": "tsc && vite build && vite build -c vite.node.config.ts", - "preview": "vite preview", // Preview production build - "test": "vitest", // Run tests (watch mode) - "test:no-watch": "vitest run", // CI tests - "test:ui": "vitest --ui", // Visual test runner - "coverage": "vitest run --coverage", // Coverage report - "storybook": "storybook dev -p 6006", // Component explorer - "build-storybook": "storybook build", // Static storybook - "lint": "eslint . --ext ts,tsx", // Type-aware linting - "types": "tsc --noEmit", // Type checking only - "format": "prettier . --write", // Auto-format - "generate:graphql": "graphql-codegen", // Generate GraphQL types - "alpha:publish": "npm publish --tag alpha" -} -``` - -### First Time Setup Checklist - -- [ ] `npm ci` completes successfully -- [ ] LSP server is running (check `http://127.0.0.1:8001/v1/ping`) -- [ ] Dev server starts: `npm run dev` -- [ ] Navigate to `http://localhost:5173` -- [ ] Chat interface loads without errors -- [ ] Can send a test message (requires API key or local model) -- [ ] Storybook works: `npm run storybook` -- [ ] Tests pass: `npm run test:no-watch` - -### Project Configuration Files - -``` -gui/ -├── package.json # Dependencies & scripts -├── tsconfig.json # TypeScript compiler options -├── tsconfig.node.json # Node-specific TS config -├── vite.config.ts # Main Vite config (browser) -├── vite.node.config.ts # Node types build -├── .eslintrc.cjs # ESLint rules -├── .prettierrc # (if exists) Code formatting -├── codegen.ts # GraphQL code generation -├── .storybook/ # Storybook configuration -│ ├── main.ts -│ └── preview.tsx -└── .husky/ # Git hooks - └── pre-commit # Runs lint-staged -``` - -**Lint-staged** (pre-commit): - -```json -{ - "*.{ts,tsx}": ["prettier --write", "eslint --cache --fix"], - "*.{js,css,md}": "prettier --write" -} -``` - ---- - -## Message Flow & Streaming - -### Overview - -The chat system uses **Server-Sent Events (SSE)** over HTTP fetch streams for real-time AI responses. - -### Complete Flow Timeline - -``` -1. User types message & clicks Send - ↓ -2. dispatch(chatAskQuestionThunk({messages, chatId, mode})) - → src/features/Chat/Thread/actions.ts:335 - ↓ -3. formatMessagesForLsp(messages) - → Converts internal format to LSP format - → Filters out UI-only fields - ↓ -4. sendChat({messages, model, stream: true, abortSignal, ...}) - → src/services/refact/chat.ts:146 - → POST http://127.0.0.1:8001/v1/chat - → Body: {messages, model, stream: true, meta: {chat_id, chat_mode}} - ↓ -5. response.body.getReader() → ReadableStream - ↓ -6. consumeStream(reader, signal, onAbort, onChunk) - → src/features/Chat/Thread/utils.ts:886 - → Decodes SSE format: "data: {json}\n\n" - ↓ -7. For each chunk: onChunk(json) - → dispatch(chatResponse({...json, id: chatId})) - ↓ -8. Reducer: case chatResponse (reducer.ts:207) - → formatChatResponse(state.thread.messages, payload) - → Updates messages array immutably - → Sets streaming: true, waiting_for_response: false - ↓ -9. ChatContent component re-renders with updated messages - → Renders incrementally as content streams - ↓ -10. Stream ends: "data: [DONE]" or error - → dispatch(doneStreaming({id: chatId})) - → postProcessMessagesAfterStreaming() - → streaming: false, read: true -``` - -### SSE Stream Format - -**Protocol**: Server-Sent Events via ReadableStream - -``` -data: {"choices":[{"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]}\n\n -data: {"choices":[{"delta":{"content":" world"},"finish_reason":null}]}\n\n -data: {"choices":[{"delta":{},"finish_reason":"stop"}],"usage":{"total_tokens":50}}\n\n -data: [DONE]\n\n -``` - -**Special markers:** - -- `data: [DONE]` - Stream complete -- `data: [ERROR]` - Generic error -- `data: {"detail":"..."}` - Structured error (LiteLLM format) -- `data: {"error":{"message":"..."}}` - LiteLLM streaming error - -### The `consumeStream` Function - -**Location**: `src/features/Chat/Thread/utils.ts:886` - -**Key features:** - -1. **Malformed chunk handling** - If buffer doesn't end with `\n\n`, combines with next chunk -2. **Error detection** - Checks for `{"detail":...}` at byte level before parsing -3. **Robust parsing** - Falls back to buffer combination on JSON parse errors -4. **Abort handling** - Respects AbortSignal for user cancellation - -```typescript -export function consumeStream( - reader: ReadableStreamDefaultReader, - signal: AbortSignal, - onAbort: () => void, - onChunk: (chunk: Record) => void, -) { - const decoder = new TextDecoder(); - - function pump({ - done, - value, - }: ReadableStreamReadResult): Promise { - if (done) return Promise.resolve(); - if (signal.aborted) { - onAbort(); - return Promise.resolve(); - } - - // Decode bytes to string - const streamAsString = decoder.decode(value); - - // Split by SSE delimiter - const deltas = streamAsString.split("\n\n").filter((str) => str.length > 0); - - for (const delta of deltas) { - if (!delta.startsWith("data: ")) continue; - - const maybeJsonString = delta.substring(6); // Remove "data: " - - if (maybeJsonString === "[DONE]") return Promise.resolve(); - if (maybeJsonString === "[ERROR]") - return Promise.reject(new Error("error from lsp")); - - // Parse JSON - const json = parseOrElse>(maybeJsonString, {}); - onChunk(json); - } - - return reader.read().then(pump); // Recursive read - } - - return reader.read().then(pump); -} +npm run test:all # CI +npm run lint # eslint strict-type-checked +npm run types # tsc --noEmit +DEBUG=* npm run dev # debug logging ``` -### The `formatChatResponse` Function - -**Location**: `src/features/Chat/Thread/utils.ts:331-650` (320 lines!) - -**Purpose**: Merge streaming delta into existing messages array - -**Response Types Handled:** - -| Type | Detection | Action | -| ---------------------- | ---------------------- | -------------------------------------------- | -| `UserResponse` | `role: "user"` | Replace last user message (compression hint) | -| `ContextFileResponse` | `role: "context_file"` | Append context files | -| `SubchatResponse` | Has `subchat_id` | Update tool call with subchat ID | -| `ToolResponse` | `role: "tool"` | Append tool result message | -| `DiffResponse` | `role: "diff"` | Append diff chunks | -| `PlainTextResponse` | `role: "plain_text"` | Append plain text message | -| `SystemResponse` | `role: "system"` | **Prepend** to messages (goes first) | -| **ChatResponseChoice** | Has `choices[]` | **Merge delta into assistant message** ⭐ | - -**Delta Types (in `choices[0].delta`):** +## Architecture -```typescript -delta: { - role?: "assistant", - content?: string, // Main response text - reasoning_content?: string, // Separate reasoning field - tool_calls?: ToolCall[], // Function calls - thinking_blocks?: ThinkingBlock[], // COT blocks - provider_specific_fields?: { - citation?: WebSearchCitation // Web search results - } -} ``` - -**Merging Logic:** - -1. **Content delta** - Concatenate strings: `prevContent + delta.content` -2. **Tool calls delta** - `mergeToolCalls(prev, add)`: - - If new tool (has `function.name`), append - - If continuation (only `arguments`), concat to last tool's arguments - - Handle missing IDs (generate UUID) - - Handle broken indexes (Qwen3/sglang quirks) -3. **Thinking blocks** - `mergeThinkingBlocks(prev, add)`: - - Always merge into first block - - Concat `thinking` and `signature` strings -4. **Citations** - Append to array (web search links) -5. **Usage/Metering** - Take highest values (later chunks have final counts) - -**Post-processing** (`postProcessMessagesAfterStreaming`): - -- Deduplicate tool calls -- Filter out server-executed tools (`srvtoolu_*` prefix) -- Clean up incomplete tool calls - -### State Transitions - -```typescript -// Initial state -{ - streaming: false, - waiting_for_response: false, - prevent_send: false, - thread: { messages: [] } -} - -// After submit -dispatch(chatAskedQuestion) → -{ - waiting_for_response: true, // Blocks duplicate sends - prevent_send: false -} - -// First chunk arrives -dispatch(chatResponse) → -{ - streaming: true, // UI shows streaming indicator - waiting_for_response: false, - thread: { messages: [{role: "assistant", content: "H"}] } -} - -// More chunks -dispatch(chatResponse) x N → -{ - streaming: true, - thread: { messages: [{role: "assistant", content: "Hello world..."}] } -} - -// Stream completes -dispatch(doneStreaming) → -{ - streaming: false, - waiting_for_response: false, - prevent_send: false, // Allow next message - thread: { read: true, messages: [...] } // Mark as read -} - -// Error -dispatch(chatError) → -{ - streaming: false, - waiting_for_response: false, - prevent_send: true, // Block sends until error cleared - error: "Error message" -} +React App → Redux (RTK Query) → LSP Server (:8001) [chat, tools, caps, models] + → SmallCloud (GraphQL) [auth, teams, surveys] + → IDE (postMessage) [file ops, theme, context] ``` -### Tool Loop Detection +### Directory Layout -**Problem**: AI might call same tool repeatedly with same args (infinite loop) - -**Solution**: `checkForToolLoop(messages)` (actions.ts:293) - -- Scans recent assistant+tool messages -- Detects duplicate tool calls with identical results -- Sets `only_deterministic_messages: true` to stop streaming - -### Queued Messages (Priority System) - -**Feature**: User can send multiple messages while streaming - -```typescript -type QueuedUserMessage = { - id: string; - message: UserMessage; - createdAt: number; - priority?: boolean; // Send immediately after current stream ends -}; - -// Regular queue: waits for tools to complete -// Priority queue: sends right after streaming (next turn) ``` - -**Hook**: `useAutoSend()` in `useSendChatRequest.ts:362-477` - -- Monitors `queuedMessages`, `streaming`, `hasUnsentTools` -- Auto-flushes when appropriate conditions met -- Priority messages bypass tool completion wait - ---- +src/ +├── app/ # Store (combineSlices), middleware (50+ listeners), storage +├── features/ # Redux slices + feature UIs +│ ├── Chat/Thread/ # Multi-thread: reducer, selectors (~40+), actions, types +│ ├── Checkpoints/ # Workspace rollback +│ ├── CoinBalance/ # Token/credit balance +│ ├── Config/ # Global settings + FeatureMenu +│ ├── Connection/ # SSE connection status +│ ├── Customization/# Agent modes, subagent forms, tool parameter editor +│ ├── FIM/ # Fill-in-Middle debug +│ ├── History/ # Chat history +│ ├── Integrations/ # Integration config +│ ├── Knowledge/ # Memory system + knowledge graph view +│ ├── Login/ # Login page +│ ├── Pages/ # Navigation stack +│ ├── PatchesAndDiffsTracker/ +│ ├── Providers/ # LLM provider config + OAuth +│ ├── Statistics/ # Usage charts +│ ├── Tasks/ # Task management +│ ├── Teams/ # Team/group management +│ ├── ThreadHistory/# Thread history view +│ └── UserSurvey/ +├── components/ # Reusable UI (50+ dirs) +│ ├── ChatContent/ # Message rendering (ChatContent, ToolsContent, DiffContent) +│ ├── ChatForm/ # Input form + ToolConfirmation +│ ├── FIMDebug/ # FIM debug panel +│ ├── IntegrationsView/ # Integration UI + Docker + MCP logs +│ ├── Providers/ # ProviderForm, ProviderOAuth, ModelCard +│ ├── Sidebar/ # Navigation +│ ├── Tour/ # Onboarding (Welcome, TourBubble) +│ ├── Trajectory/ # Trajectory popover +│ └── UsageCounter/ # Token tracking, streaming counter +├── hooks/ # 72+ custom hooks +├── services/ # RTK Query APIs (20+) + chat commands/subscription +│ ├── refact/ # LSP APIs (caps, tools, docker, integrations, etc.) +│ └── smallcloud/ # Cloud auth (GraphQL) +├── contexts/ # AbortControllers, InternalLink +├── events/ # IDE integration event types + setup +├── lib/ # Library entry (render + events export) +├── utils/ # Utilities (@-command parsing, token calc, test helpers) +├── __tests__/ # 15+ test files (SSE protocol, integration, slices) +└── __fixtures__/ # 20+ fixture files for tests +``` + +## Chat Flow (Command/Event SSE) + +``` +User sends → POST /v1/chats/{chatId}/commands {type: "user_message", content} + → Backend processes, streams via SSE + → GET /v1/chats/subscribe?chat_id={id} + → Events: snapshot → stream_started → stream_delta* → stream_finished + → dispatch(applyChatEvent) per event → reducer updates state → React re-renders +``` + +### SSE Event Types + +| Event | Purpose | +| ------------------------------- | --------------------------------- | +| `snapshot` | Full state sync (resets seq to 0) | +| `stream_started` | AI response beginning | +| `stream_delta` | Incremental content (DeltaOp[]) | +| `stream_finished` | Complete with usage stats | +| `message_added/updated/removed` | Message CRUD | +| `messages_truncated` | Messages trimmed | +| `thread_updated` | Thread metadata changed | +| `runtime_updated` | Runtime flags changed | +| `pause_required/cleared` | Tool confirmation | +| `ide_tool_required` | IDE tool execution needed | +| `subchat_update` | Nested chat update | +| `queue_updated` | Command queue changed | +| `ack` | Command acknowledgment | + +### Delta Operations + +`append_content` · `append_reasoning` · `set_tool_calls` · `set_thinking_blocks` · `add_citation` · `add_server_content_block` · `set_usage` · `merge_extra` + +### Command Types (POST /v1/chats/{chatId}/commands) + +`user_message` · `abort` · `regenerate` · `update_message` · `remove_message` · `tool_decision` · `tool_decisions` · `ide_tool_result` · `set_params` · `retry_from_index` · `branch_from_chat` + +### Sequence Validation + +Every event has a `seq` number. `snapshot` resets to 0, each subsequent increments by 1. Gap detected → immediate reconnect for fresh snapshot. ## State Management -### Redux Architecture - -**Modern Redux Toolkit** with `combineSlices` (not legacy `combineReducers`) - -**Store Setup**: `src/app/store.ts` - -```typescript -import { combineSlices, configureStore } from "@reduxjs/toolkit"; -import { listenerMiddleware } from "./middleware"; - -// Feature slices -import { chatSlice } from "../features/Chat/Thread/reducer"; -import { historySlice } from "../features/History/historySlice"; -import { configSlice } from "../features/Config/configSlice"; -import { pagesSlice } from "../features/Pages/pagesSlice"; -// ... 20+ more slices - -// RTK Query APIs -import { capsApi } from "../services/refact/caps"; -import { commandsApi } from "../services/refact/commands"; -// ... 15+ more APIs - -const rootReducer = combineSlices( - chatSlice, - historySlice, - configSlice, - // Auto-registers RTK Query reducers - capsApi, - commandsApi, - // ... -); - -export const store = configureStore({ - reducer: rootReducer, - middleware: (getDefaultMiddleware) => - getDefaultMiddleware() - .prepend(listenerMiddleware.middleware) - .concat(capsApi.middleware, commandsApi.middleware /* ... */), -}); -``` - -### Key Slices - -| Slice | Purpose | Location | State Keys | -| ---------------- | ---------------------- | ------------------------------------------------ | ----------------------------------------------------------------------------------------- | -| **chat** | Active thread + cache | `features/Chat/Thread/reducer.ts` | `thread`, `streaming`, `waiting_for_response`, `prevent_send`, `cache`, `queued_messages` | -| **history** | Chat history (max 100) | `features/History/historySlice.ts` | `chats`, `selectedId` | -| **config** | Global settings | `features/Config/configSlice.ts` | `host`, `lspPort`, `apiKey`, `features`, `themeProps` | -| **pages** | Navigation stack | `features/Pages/pagesSlice.ts` | `pages` (array of page objects) | -| **activeFile** | IDE context | `features/Chat/activeFile.ts` | `file_name`, `can_paste`, `cursor` | -| **checkpoints** | Rollback UI state | `features/Checkpoints/checkpointsSlice.ts` | `previewData`, `restoreInProgress` | -| **confirmation** | Tool pause reasons | `features/ToolConfirmation/confirmationSlice.ts` | `pauseReasons`, `wasInteracted`, `confirmationStatus` | -| **errors** | Error messages | `features/Errors/errorsSlice.ts` | `errors` (array) | -| **teams** | Active team/group | `features/Teams/teamsSlice.ts` | `activeGroup` | - -### RTK Query APIs - -**All APIs** auto-generate hooks like `useGetCapsQuery`, `useUpdateModelMutation` - -| API | Base URL | Purpose | Key Endpoints | -| ------------------- | --------------------------- | ------------------ | ------------------------------------ | -| **capsApi** | `/v1/caps` | Model capabilities | `getCaps` | -| **commandsApi** | `/v1/at-command-completion` | Autocomplete | `getCompletion`, `getPreview` | -| **toolsApi** | `/v1/tools` | Tool system | `getTools`, `checkForConfirmation` | -| **dockerApi** | `/v1/docker-*` | Container mgmt | `getContainers`, `executeAction` | -| **integrationsApi** | `/v1/integrations` | Config files | `getData`, `saveData` | -| **modelsApi** | `/v1/customization` | Model config | `getModels`, `updateModel` | -| **providersApi** | `/v1/customization` | Provider config | `getProviders`, `updateProvider` | -| **checkpointsApi** | `/v1/*_checkpoints` | Workspace rollback | `preview`, `restore` | -| **pathApi** | `/v1/*_path` | File paths | `getFullPath`, `customizationPath` | -| **telemetryApi** | `/v1/telemetry` | Analytics | `sendChatEvent`, `sendNetEvent` | -| **linksApi** | `/v1/links` | Smart links | `getLinks` | -| **smallCloudApi** | `https://www.smallcloud.ai` | Auth/user | `getUser`, `getUserSurvey` (GraphQL) | - -**Note**: Chat is NOT an RTK Query API - uses manual `fetch` with custom streaming logic. - -### Selectors Pattern - -**Always use selectors** (don't access `state.chat.thread.messages` directly) - -```typescript -// src/features/Chat/Thread/selectors.ts - -export const selectThread = (state: RootState) => state.chat.thread; -export const selectMessages = (state: RootState) => state.chat.thread.messages; -export const selectIsStreaming = (state: RootState) => state.chat.streaming; -export const selectChatId = (state: RootState) => state.chat.thread.id; - -// Memoized selectors with Reselect -export const selectLastAssistantMessage = createSelector( - [selectMessages], - (messages) => { - for (let i = messages.length - 1; i >= 0; i--) { - if (isAssistantMessage(messages[i])) return messages[i]; - } - return null; - }, -); - -// Complex selectors -export const selectHasUncalledTools = createSelector( - [selectMessages], - (messages) => { - const lastMsg = messages[messages.length - 1]; - if (!isAssistantMessage(lastMsg)) return false; - if (!lastMsg.tool_calls) return false; - return lastMsg.tool_calls.some((tc) => !isServerExecutedTool(tc.id)); - }, -); -``` - -**30+ selectors** in `selectors.ts` - use them for consistency! - -### Redux Persist - -**Location**: `src/app/storage.ts` - -```typescript -import { persistReducer } from "redux-persist"; -import storage from "redux-persist/lib/storage"; // localStorage - -const persistConfig = { - key: "refact-chat", - storage, - whitelist: ["history", "config"], // Only persist these slices - transforms: [pruneHistoryTransform], // Limit to 100 chats -}; - -// Prune old chats on save -const pruneHistoryTransform = createTransform( - (inboundState: HistoryState) => { - if (inboundState.chats.length <= 100) return inboundState; - return { - ...inboundState, - chats: inboundState.chats.slice(-100), // Keep last 100 - }; - }, - null, - { whitelist: ["history"] }, -); -``` - -**Why only history + config?** - -- Active chat (`state.chat`) is ephemeral -- Cache is cleared on app restart -- Prevents localStorage quota issues - -### Middleware & Listeners - -**Location**: `src/app/middleware.ts` - -**Purpose**: Cross-cutting concerns that don't fit in reducers - -```typescript -export const listenerMiddleware = createListenerMiddleware() - -// 1. Error handling for RTK Query -listenerMiddleware.startListening({ - matcher: isAnyOf( - capsApi.endpoints.getCaps.matchRejected, - // ... other rejected matchers - ), - effect: (action, listenerApi) => { - listenerApi.dispatch(addError({ - message: action.error.message, - type: 'GLOBAL' - })) - } -}) - -// 2. IDE tool response handling -listenerMiddleware.startListening({ - actionCreator: ideToolCallResponse, - effect: (action, listenerApi) => { - const { toolCallId, chatId, accepted } = action.payload - - // Update history - listenerApi.dispatch(upsertToolCallIntoHistory({...})) - - // Update active thread - listenerApi.dispatch(upsertToolCall({...})) - - // Remove pause reason for this tool - listenerApi.dispatch(updateConfirmationAfterIdeToolUse({...})) - - // Continue chat if no more pause reasons - const state = listenerApi.getState() - if (state.confirmation.pauseReasons.length === 0 && accepted) { - listenerApi.dispatch(sendCurrentChatToLspAfterToolCallUpdate({ - chatId, toolCallId - })) - } - } -}) - -// 3. Theme class updates -listenerMiddleware.startListening({ - predicate: (action, currentState, previousState) => { - return currentState.config.themeProps?.appearance !== - previousState.config.themeProps?.appearance - }, - effect: (action, listenerApi) => { - const appearance = listenerApi.getState().config.themeProps?.appearance - document.body.className = appearance === 'light' ? 'vscode-light' : 'vscode-dark' - } -}) - -// 10+ more listeners for: -// - Telemetry events -// - History auto-save -// - File reload triggers -// - JetBrains-specific tree refresh -``` - -**Key Pattern**: Use listeners for: - -- Side effects (postMessage, telemetry) -- Cross-slice coordination -- Reacting to RTK Query lifecycle - ---- - -## Component Hierarchy & Rendering - -### Visual Component Tree - -``` -App (features/App.tsx) -├─ Provider Stack -│ ├─ Redux Provider -│ ├─ urql Provider (GraphQL) -│ ├─ PersistGate (redux-persist) -│ ├─ Theme (Radix) -│ ├─ TourProvider -│ └─ AbortControllerProvider -│ -└─ InnerApp - ├─ Sidebar (navigation) - ├─ Toolbar (tabs if tabbed mode) - │ - └─ PageWrapper (current page) - ├─ Chat (main chat page) ⭐ - │ ├─ ChatHistory - │ ├─ ChatContent ⭐⭐ (message renderer) - │ │ ├─ UserInput (editable messages) - │ │ ├─ AssistantInput (AI responses) - │ │ │ ├─ ReasoningContent (thinking blocks) - │ │ │ ├─ Markdown (main content) - │ │ │ ├─ ToolsContent ⭐⭐⭐ (most complex) - │ │ │ └─ Citations (web search links) - │ │ ├─ DiffContent (file changes) - │ │ ├─ QueuedMessage (pending sends) - │ │ └─ SystemInput (system messages) - │ │ - │ └─ ChatForm (input + controls) - │ ├─ TextArea - │ ├─ PromptSelect - │ ├─ ToolConfirmation (pause popup) - │ ├─ FilesPreview - │ └─ AgentCapabilities - │ - ├─ ThreadHistory (view old thread) - ├─ Statistics (usage charts) - ├─ Integrations (config UI) - ├─ Providers (LLM config) - └─ FIMDebug (debug panel) -``` - -### Critical Component: ChatContent - -**Location**: `src/components/ChatContent/ChatContent.tsx` (283 lines) - -**Purpose**: Dispatcher that routes message types to specialized renderers - -**Core Algorithm**: - -```typescript -function renderMessages( - messages: ChatMessages, - onRetry: (index, question) => void, - waiting: boolean, - memo: React.ReactNode[] = [], - index = 0 -): React.ReactNode[] { - if (messages.length === 0) return memo - - const [head, ...tail] = messages - - // Route by message type - if (head.role === 'tool') { - return renderMessages(tail, onRetry, waiting, memo, index + 1) // Skip tools - } - - if (head.role === 'user') { - return renderMessages(tail, onRetry, waiting, - memo.concat(), - index + 1 - ) - } - - if (head.role === 'assistant') { - // Group consecutive diffs + tools with this assistant message - const [diffMessages, toolMessages, rest] = groupRelatedMessages(tail) - - return renderMessages(rest, onRetry, waiting, - memo.concat( - - ), - index + diffMessages.length + toolMessages.length + 1 - ) - } - - // ... handle other types - return renderMessages(tail, onRetry, waiting, memo, index + 1) -} -``` - -**Key Behavior**: - -- **Recursive** processing (not `map`) -- **Groups** diffs + tools with assistant messages -- **Skips** tool messages (shown inline in AssistantInput) -- **Appends** memo (pure functional, no mutations) - -### UserInput Component - -**Props**: - -```typescript -interface UserInputProps { - message: UserMessage; - index: number; - onRetry?: (index: number, content: string) => void; -} -``` - -**Features**: - -- **Editable** via inline textarea (click to edit) -- **Checkpoints** badge (if message has checkpoints) -- **Image attachments** (multi-modal content parsing) -- **Compression hint** 🗜️ icon -- **Context files** 🗃️ icon (memories) - -**Content Types**: - -```typescript -type UserMessage = { - role: "user"; - content: string | UserMessageContent[]; // String or multi-modal - checkpoints?: Checkpoint[]; - compression_strength?: "absent" | "weak" | "strong"; -}; - -type UserMessageContent = - | { type: "text"; text: string } - | { type: "image_url"; image_url: { url: string } }; -``` - -### AssistantInput Component +**Store**: `src/app/store.ts` — `combineSlices` with 12+ slices + 20+ RTK Query APIs -**Props**: +### Key State (per-thread) ```typescript -interface AssistantInputProps { - message: AssistantMessage; - diffMessages: DiffMessage[]; - toolMessages: ToolMessage[]; - waiting: boolean; - onRetry?: () => void; +state.chat.threads[id]: ChatThreadRuntime = { + thread: ChatThread, // id, messages, model, title, tool_use, boost_reasoning, reasoning_effort, temperature, mode, is_task_chat, task_meta + streaming: boolean, + waiting_for_response: boolean, + prevent_send: boolean, + error: string | null, + queued_items: QueuedItem[], + attached_images: ImageFile[], + confirmation: ThreadConfirmation, // pause, pause_reasons, status + snapshot_received: boolean, } ``` -**Rendering Order**: - -1. **ReasoningContent** (thinking blocks) - collapsible -2. **Main content** (Markdown) - with syntax highlighting -3. **ToolsContent** (for each tool_call) - complex nested tree -4. **DiffContent** (grouped diffs) - apply/reject UI -5. **Citations** (web search results) - clickable links -6. **Like/Resend buttons** (bottom actions) -7. **Usage info** (tokens, cost) - footer - -**Streaming Behavior**: - -- Shows streaming indicator while `waiting || content.endsWith('▍')` -- Markdown renders incrementally (no flicker) -- Tool calls appear as they arrive - -### ToolsContent Component ⭐ - -**Location**: `src/components/ChatContent/ToolsContent.tsx` (668 lines!) - -**Why so complex?** - -- Handles 10+ tool types -- Nested subchats (5 levels deep possible) -- Multi-modal results (text, images, files) -- Special cases: Knowledge, TextDoc browser - -**Visual Structure**: - -``` -ToolsContent (one per tool_call) -├─ Header (tool name, status badge) -├─ Arguments (collapsible JSON) -│ -└─ Result (polymorphic by tool type) - ├─ TextResult (most tools) - ├─ KnowledgeResults (search results with scores) - │ └─ FileList (clickable files) - ├─ TextDocContent (file browser) - │ ├─ FileTree navigation - │ ├─ File content viewer - │ └─ SmartLinks (context actions) - └─ MultiModalResult (images + text) - └─ DialogImage (lightbox) -``` - -**Tool Status Badge**: - -- ⏳ `thinking` - Tool executing -- ✅ `success` - Completed -- ❌ `error` - Failed -- ☁️ `server` - Server-executed tool (display only) +**Navigation**: `current_thread_id`, `open_thread_ids` (tabs), `threads` map -**Special Tool Types**: - -| Tool Type | Component | Notes | -| -------------------- | ------------------- | ------------------------------------------ | -| `knowledge` | KnowledgeResults | Shows search results with relevance scores | -| `textdoc` | TextDocContent | Interactive file browser with navigation | -| `subchat_*` | Nested ToolsContent | Recursive subchat rendering (max 5 deep) | -| `patch`, `text_edit` | DiffContent | Shows in DiffContent, not ToolsContent | -| Server tools | Badge only | `srvtoolu_*` prefix, no execution UI | +### Redux Persist -### DiffContent Component +Whitelist: `["tour", "userSurvey"]` (NOT chat/history — those are ephemeral) -**Location**: `src/components/ChatContent/DiffContent.tsx` (364 lines) +### Key Selectors (features/Chat/Thread/selectors.ts, ~40+) -**Purpose**: Group and display file changes with apply/reject controls +Always use selectors. Never access `state.chat.threads[id]` directly in components. -**Grouping Logic**: +### RTK Query APIs -```typescript -// Groups consecutive diffs by tool_call_id -const groupedDiffs = diffMessages.reduce((acc, msg) => { - const key = msg.tool_call_id || "ungrouped"; - if (!acc[key]) acc[key] = []; - acc[key].push(msg); - return acc; -}, {}); -``` +All generate hooks (`useGetCapsQuery`, etc.). Dynamic base URL from Redux state. Auto-injects auth. -**Each Group Renders**: +| API | Key Endpoints | +| ------------------------------- | ---------------------------------------------------------------------- | +| capsApi | `/v1/caps` | +| commandsApi | `/v1/at-command-completion`, `/v1/at-command-preview` | +| toolsApi | `/v1/tools`, `/v1/tools/check_confirmation` | +| dockerApi | `/v1/docker-container-list`, `/v1/docker-container-action` | +| integrationsApi | `/v1/integrations-list`, `/v1/integration-get`, `/v1/integration-save` | +| modelsApi, providersApi | `/v1/customization` | +| checkpointsApi | `/v1/preview_checkpoints`, `/v1/restore_checkpoints` | +| telemetryApi | `/v1/telemetry/chat` | +| linksApi | `/v1/links` | +| trajectoriesApi, trajectoryApi | `/v1/trajectories/*` | +| tasksApi | Tasks CRUD | +| chatModesApi, customizationApi | Agent modes/customization | +| knowledgeApi, knowledgeGraphApi | Knowledge/memory | +| smallCloudApi | `https://www.smallcloud.ai` (GraphQL) | -- **Header**: Tool name, file count, timestamps -- **Diff Viewer**: Line-by-line changes with syntax highlighting -- **Actions**: Apply All, Reject All (per group) -- **IDE Link**: Clickable file paths (opens in IDE) +Chat uses **Commands API** + **SSE subscription**, not RTK Query. -**Diff Format**: +## Key Hooks -```typescript -type DiffChunk = { - file_name: string; - file_action: "A" | "M" | "D"; // Added/Modified/Deleted - line1: number; - line2: number; - chunks: string; // Unified diff format -}; -``` +| Hook | Purpose | +| -------------------------------- | ---------------------------------------------------------------------------------------- | +| `useChatActions` | submit, abort, regenerate, respondToToolConfirmation | +| `useChatSubscription` | Single chat SSE connection | +| `useAllChatsSubscription` | Multi-tab SSE manager | +| `useEnsureSubscriptionConnected` | Wait for snapshot before actions | +| `useEventBusForApp` | IDE → GUI events (file context, new chat, tool approval) | +| `useEventBusForIDE` | GUI → IDE events (open file, paste, tool call) | +| `usePostMessage` | Transport: VSCode `acquireVsCodeApi`, JetBrains `postIntellijMessage`, web `postMessage` | +| `useCheckpoints` | Checkpoint preview/restore | +| `useActiveTeamsGroup` | Teams group management | -### Message Type Routing Summary +## Components -| Role | Component | Skip Render? | Group With? | -| -------------- | -------------------------- | ------------ | ------------- | -| `user` | UserInput | No | - | -| `assistant` | AssistantInput | No | diffs + tools | -| `tool` | (inline in AssistantInput) | Yes | - | -| `diff` | DiffContent | No (grouped) | assistant | -| `context_file` | ContextFiles | No | - | -| `system` | SystemInput | No | - | -| `plain_text` | PlainText | No | - | +### ChatContent (src/components/ChatContent/ChatContent.tsx) -### Special Content Markers +Dispatches messages to specialized renderers. Iterative processing (not recursive). Groups assistant messages with related diffs + tools. -**In UI, look for these icons**: +| Role | Component | Notes | +| -------------- | -------------------------- | -------------------------------------------------------------------- | +| `user` | UserInput | Editable, checkpoints badge, images, compression hint 🗜️ | +| `assistant` | AssistantInput | ReasoningContent → Markdown → ToolsContent → DiffContent → Citations | +| `tool` | (inline in AssistantInput) | Skipped in top-level render | +| `diff` | DiffContent | Grouped by tool_call_id, apply/reject UI | +| `context_file` | ContextFiles | Memory/knowledge attachments 🗃️ | -| Icon | Meaning | Location | -| ---- | ------------------------------------ | ---------------- | -| 🗜️ | Compression hint (context too large) | UserInput | -| 🗃️ | Memory/context files attached | UserInput | -| ⏳ | Tool thinking | ToolsContent | -| ✅ | Tool success | ToolsContent | -| ❌ | Tool failed | ToolsContent | -| ☁️ | Server-executed tool | ToolsContent | -| 🔄 | Checkpoint reset available | CheckpointButton | +### ToolsContent (src/components/ChatContent/ToolsContent.tsx) ---- +Largest component (~1180 lines). Handles 10+ tool types including nested subchats (max 5 deep), knowledge results, file browser, multi-modal results. OpenAI-specific tool components: AudioTool, ComputerCallTool, CodeInterpreterCallTool, FileSearchCallTool. -## UI & Styling +**Tool status**: ⏳ thinking · ✅ success · ❌ error · ☁️ server (`srvtoolu_*` prefix) -### Styling Architecture +### Tool Confirmation -**Two-layer system**: **Radix UI Themes** + **CSS Modules** +`pause_required` event → ToolConfirmation popup → Allow Once / Allow Chat / Stop. -``` -Radix Themes (design tokens) - ↓ provides -CSS Variables (--space-*, --color-*, --radius-*) - ↓ used by -CSS Modules (component-specific styles) -``` +Auto-approve for patch-like tools when `automatic_patch === true`: `patch`, `text_edit`, `create_textdoc`, `update_textdoc`, `replace_textdoc`, `update_textdoc_regex`, `update_textdoc_by_lines`. -### Golden Rules +## Styling -1. ✅ **Use Radix primitives for layout**: `Flex`, `Box`, `Text`, `Card`, `Button` -2. ✅ **Use design tokens** (not magic numbers): `var(--space-3)`, `var(--color-accent-9)` -3. ✅ **CSS Modules** for component-specific styles: `styles.chatContent` -4. ❌ **Avoid global CSS** (exception: `src/lib/render/web.css` for body baseline) -5. ❌ **No inline styles** (use CSS Modules or Radix props) -6. ❌ **No magic numbers** (`padding: 8px` → `padding: var(--space-2)`) +**Radix Themes** (design tokens) + **CSS Modules** (component-specific). -### Radix Design Tokens +**Rules**: Use Radix primitives (`Flex`, `Box`, `Text`, `Card`, `Button`). Use design tokens (`var(--space-3)`, `var(--accent-9)`). CSS Modules for custom styles. No inline styles, no magic numbers, no hardcoded colors, no global CSS. -**Spacing** (based on 4px grid): +## IDE Integration (postMessage) -```css ---space-1: 4px --space-2: 8px --space-3: 12px --space-4: 16px --space-5: 20px - --space-6: 24px --space-7: 28px --space-8: 32px --space-9: 36px; -``` +**Host modes**: `web` | `vscode` | `jetbrains` | `ide` -**Colors** (semantic tokens): +**IDE → GUI**: `updateConfig`, `setFileInfo`, `setSelectedSnippet`, `newChatAction`, `ideToolCallResponse` +**GUI → IDE**: `ideOpenFile`, `ideDiffPasteBack`, `ideToolCall`, `ideNewFile`, `ideAnimateFileStart/Stop` -```css ---accent-1 through --accent-12 /* Primary brand color scale */ ---gray-1 through --gray-12 /* Neutral grays */ ---color-background /* Page background */ ---color-surface /* Card background */ ---color-panel-solid /* Overlay background */ -``` +## Multi-Tab & Background Threads -**Radii**: +Threads continue processing even without open tabs. `closeThread` preserves busy runtimes (streaming, waiting, paused). Background thread needs confirmation → auto-switches user to that tab. -```css ---radius-1: 4px --radius-2: 6px --radius-3: 8px --radius-4: 12px --radius-full: - 9999px; -``` +**Two SSE systems**: Chat subscription (per-thread, real-time state) + Trajectories subscription (global, metadata sync only). -**Typography**: +### State Machine (per thread) -```css ---font-size-1 through --font-size-9 ---line-height-1 through --line-height-9 ---font-weight-regular: 400 ---font-weight-medium: 500 ---font-weight-bold: 700 ``` - -### Theme Configuration - -**Component**: `src/components/Theme/Theme.tsx` - -```typescript -interface ThemeProps { - appearance?: 'light' | 'dark' | 'inherit' - accentColor?: 'indigo' | 'blue' | 'green' | /* ... */ - grayColor?: 'gray' | 'mauve' | 'slate' | 'auto' - radius?: 'none' | 'small' | 'medium' | 'large' | 'full' - scaling?: '90%' | '95%' | '100%' | '105%' | '110%' -} - -export function Theme({ children }: { children: React.ReactNode }) { - const config = useConfig() - const appearance = useAppearance() // Listens to OS/IDE theme - - return ( - - {children} - - ) -} +IDLE → [submit] → WAITING → [first chunk] → STREAMING → [finish] → IDLE + → [pause_required] → PAUSED → [confirm] → IDLE + → [error/abort] → STOPPED ``` -**Host-specific behavior**: - -- `host === 'web'`: Wrapper includes dev theme toggle -- `host === 'vscode' | 'jetbrains'`: No wrapper, IDE controls theme -- `document.body.className`: Set to `vscode-light` or `vscode-dark` by middleware - -### CSS Modules Pattern - -**File naming**: `Component.module.css` +### Send Invariants -**Example** (`ChatContent.module.css`): +Chat can proceed when ALL true: `snapshot_received && !streaming && !waiting_for_response && !prevent_send && !error && !confirmation.pause` -```css -.scroll_area { - height: 100%; - padding: var(--space-2) var(--space-4); -} +## Special Features -.message_group { - display: flex; - flex-direction: column; - gap: var(--space-3); -} +- **Checkpoints**: Workspace rollback via git commits. Preview → Restore. Per-message reset button. +- **Thinking Blocks**: `thinking_blocks: [{thinking, signature}]` on assistant messages. Collapsible UI. Signatures are opaque — never mutate. +- **Reasoning Content**: Separate `reasoning_content` field. Collapsible. +- **Knowledge/Memory**: `remember_how_to_use_tools` → vecdb → `context_file` messages. Knowledge graph view. +- **Customization**: Agent modes, subagent forms, tool parameter editor. +- **Tour/Onboarding**: Welcome screen, guided tour bubbles. +- **FIM Debug**: Fill-in-Middle debug panel with search context and symbol list. +- **CoinBalance**: Token/credit tracking with metering fields on messages. +- **Docker**: Container list, start/stop/kill/remove, env vars, smart links. +- **Compression Hints**: 🗜️ icon when context approaches limit. `compression_strength: "absent" | "weak" | "strong"`. +- **Queued Messages**: Send while streaming. Priority queue bypasses tool wait. +- **Multi-Modal**: Images in user messages and tool results. `DialogImage` lightbox. +- **Usage Tracking**: `UsageCounter` (circular progress), `StreamingTokenCounter` (live), `TokensMapContent` (breakdown). +- **Provider OAuth**: OAuth2 flow for provider authentication. +- **MCP Logs**: MCP integration logging in IntegrationsView. -.streaming_indicator { - color: var(--accent-9); - animation: pulse 1.5s ease-in-out infinite; -} +## Development Patterns -@keyframes pulse { - 0%, - 100% { - opacity: 1; - } - 50% { - opacity: 0.5; - } -} -``` +### Adding Redux Slice -**Usage in component**: +1. Create `features/MyFeature/myFeatureSlice.ts` with `createSlice` +2. Register in `combineSlices` in `store.ts` +3. Use `useAppSelector`/`useAppDispatch` in components -```typescript -import styles from './ChatContent.module.css' - -export function ChatContent() { - return ( -
-
- {/* ... */} -
-
- ) -} -``` +### Adding RTK Query API -**Conditional classes**: +1. Create `services/refact/myApi.ts` with `createApi` +2. Register in `combineSlices` + add `.middleware` in store +3. Use auto-generated hooks -```typescript -import classNames from 'classnames' +### Adding Component -
-``` +`Component.tsx` + `Component.module.css` + `index.ts`. Use Radix primitives + CSS Modules + design tokens. -### Common Patterns +### File Naming -**Layout with Radix**: +Components: `PascalCase.tsx` · Hooks: `useCamelCase.ts` · Utils: `camelCase.ts` · CSS: `PascalCase.module.css` -```typescript - - Header - Content - Footer - -``` - -**Typography**: - -```typescript - - Label text - -``` - -**Cards**: - -```typescript - - - {/* content */} - - -``` - -**Buttons**: - -```typescript - -``` - -### Responsive Design - -**Minimal responsive styling** (app is designed for IDE sidebars) - -**Breakpoints** (when needed): - -```css -@media (max-width: 768px) { - .sidebar { - display: none; - } -} -``` - -**Flex-based layout** handles most responsive needs automatically. - -### Dark/Light Mode - -**How it works**: - -1. User/OS sets `appearance: 'light' | 'dark'` -2. Radix Theme applies appropriate color scales -3. All Radix tokens update automatically -4. Custom CSS uses tokens, so it updates too - -**Testing dark mode**: - -- Web: Use theme toggle in UI -- VSCode: Change VSCode theme -- JetBrains: Change IDE theme - -**Custom dark mode overrides** (rare): - -```css -.my_component { - background: var(--color-surface); -} - -/* Only if Radix token doesn't work */ -:is(.dark, .dark-theme) .my_component { - background: #1a1a1a; -} -``` - -### Icons - -**Radix Icons**: - -```typescript -import { ChevronDownIcon, CheckIcon, Cross2Icon } from '@radix-ui/react-icons' - - -``` - -**Custom icons** (rare): - -```typescript -// src/images/ -export function CustomIcon() { - return {/* ... */} -} -``` - -### Animations - -**Framer Motion** for complex animations: - -```typescript -import { motion } from 'framer-motion' - - - {content} - -``` - -**CSS animations** for simple effects: - -```css -@keyframes fadeIn { - from { - opacity: 0; - } - to { - opacity: 1; - } -} - -.fade_in { - animation: fadeIn 0.2s ease-in-out; -} -``` - -### Common Mistakes to Avoid - -❌ **Using px values directly**: - -```css -/* Bad */ -.button { - padding: 12px; -} - -/* Good */ -.button { - padding: var(--space-3); -} -``` - -❌ **Hardcoded colors**: - -```css -/* Bad */ -.text { - color: #3b82f6; -} - -/* Good */ -.text { - color: var(--accent-9); -} -``` - -❌ **Global styles without scoping**: - -```css -/* Bad - affects everything */ -button { - border-radius: 8px; -} - -/* Good - scoped to module */ -.my_button { - border-radius: var(--radius-3); -} -``` - -❌ **Ignoring Radix primitives**: - -```tsx -/* Bad - reinventing the wheel */ -
- -/* Good - use Radix */ - -``` - ---- - -## API Services - -### Service Architecture - -**Two separate backends**: - -``` -┌─────────────────────────────────────────┐ -│ Frontend (React) │ -├─────────────────────────────────────────┤ -│ RTK Query APIs │ -│ - capsApi, toolsApi, dockerApi, etc. │ -└────┬──────────────────────────┬─────────┘ - │ │ - ▼ ▼ -┌─────────────────┐ ┌─────────────────┐ -│ Local LSP │ │ SmallCloud.ai │ -│ 127.0.0.1:8001 │ │ (cloud) │ -│ │ │ │ -│ - Chat │ │ - Auth │ -│ - Tools │ │ - User mgmt │ -│ - Caps │ │ - Teams │ -│ - Models │ │ - Surveys │ -│ - Docker │ │ │ -│ - Integrations │ │ (GraphQL) │ -└─────────────────┘ └─────────────────┘ -``` - -**Critical distinction**: - -- **Chat ALWAYS goes to LSP** (never SmallCloud) -- LSP handles all AI operations -- SmallCloud only for auth/user/team management - -### LSP Server Endpoints - -**Base URL**: `http://127.0.0.1:${lspPort}/v1/...` - -| Endpoint | Method | Purpose | RTK Query API | -| ------------------------------ | ------ | --------------------- | ------------------------------- | -| `/v1/chat` | POST | **Streaming chat** | ❌ Manual fetch | -| `/v1/caps` | GET | Model capabilities | `capsApi.getCaps` | -| `/v1/at-command-completion` | POST | Autocomplete | `commandsApi.getCompletion` | -| `/v1/at-command-preview` | POST | Preview command | `commandsApi.getPreview` | -| `/v1/tools` | POST | Get available tools | `toolsApi.getTools` | -| `/v1/tools/check_confirmation` | POST | Check tool approval | `toolsApi.checkForConfirmation` | -| `/v1/docker-container-list` | POST | List containers | `dockerApi.getContainers` | -| `/v1/docker-container-action` | POST | Execute action | `dockerApi.executeAction` | -| `/v1/integrations-list` | GET | List integrations | `integrationsApi.getList` | -| `/v1/integration-get` | POST | Get config | `integrationsApi.getData` | -| `/v1/integration-save` | POST | Save config | `integrationsApi.saveData` | -| `/v1/preview_checkpoints` | POST | Preview rollback | `checkpointsApi.preview` | -| `/v1/restore_checkpoints` | POST | Apply rollback | `checkpointsApi.restore` | -| `/v1/get_file_text` | POST | Read file | `pathApi.getFileText` | -| `/v1/*_path` | GET | Get config paths | `pathApi.*Path` | -| `/v1/customization` | POST | Model/provider config | `modelsApi`, `providersApi` | -| `/v1/telemetry/chat` | POST | Send telemetry | `telemetryApi.sendChatEvent` | -| `/v1/ping` | GET | Health check | `pingApi.getPing` | - -### RTK Query API Pattern - -**All APIs follow this structure**: - -```typescript -// src/services/refact/caps.ts -import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; - -export const capsApi = createApi({ - reducerPath: "caps", - baseQuery: fetchBaseQuery({ - baseUrl: (_, api) => { - const state = api.getState() as RootState; - return `http://127.0.0.1:${state.config.lspPort}`; - }, - prepareHeaders: (headers, { getState }) => { - const state = getState() as RootState; - if (state.config.apiKey) { - headers.set("Authorization", `Bearer ${state.config.apiKey}`); - } - return headers; - }, - }), - endpoints: (builder) => ({ - getCaps: builder.query({ - query: () => "/v1/caps", - }), - }), -}); - -export const { useGetCapsQuery, useLazyGetCapsQuery } = capsApi; -``` - -**Key features**: - -- **Dynamic base URL** from Redux state -- **Auto-injects auth** token if present -- **Auto-generates hooks**: `useGetCapsQuery`, `useLazyGetCapsQuery` -- **Caching** by default - -### Chat API (Special Case) - -**Why not RTK Query?** Streaming + custom chunking logic - -**Location**: `src/services/refact/chat.ts` - -```typescript -export async function sendChat({ - messages, - model, - stream: true, - abortSignal, - chatId, - port = 8001, - apiKey, - mode, - // ... -}: SendChatArgs): Promise { - const body = JSON.stringify({ - messages, - model, - stream: true, - meta: { - chat_id: chatId, - chat_mode: mode ?? 'EXPLORE', - // ... - } - }) - - const headers = { - 'Content-Type': 'application/json', - ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}) - } - - const url = `http://127.0.0.1:${port}/v1/chat` - - return fetch(url, { - method: 'POST', - headers, - body, - signal: abortSignal, - credentials: 'same-origin' - }) -} -``` - -**Response format** (SSE): - -``` -data: {"choices":[{"delta":{"role":"assistant","content":"Hi"},...}]}\n\n -data: {"choices":[{"delta":{"content":" there"},...}]}\n\n -data: [DONE]\n\n -``` - -### SmallCloud API (GraphQL) - -**Base URL**: `https://www.smallcloud.ai/v1/graphql` - -**Used for**: - -- User authentication (OAuth) -- User profile -- Team management -- Usage surveys - -**Setup**: `urqlProvider.tsx` - -```typescript -const client = createClient({ - url: "https://www.smallcloud.ai/v1/graphql", - fetchOptions: () => { - const apiKey = store.getState().config.apiKey; - return { - headers: { - ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), - }, - }; - }, - exchanges: [cacheExchange, fetchExchange, subscriptionExchange], -}); -``` - -**Example queries** (generated from GraphQL schema): - -```typescript -// useGetUser hook -const [result] = useQuery({ - query: graphql(` - query GetUser { - user { - account - email - has_valid_subscription - } - } - `), -}); -``` - -**Note**: GraphQL codegen runs via `npm run generate:graphql` - -### Type Definitions - -**All API types** in `src/services/refact/types.ts` (787 lines!) - -**Key types**: - -```typescript -// Message types -export type UserMessage = { - role: 'user' - content: string | UserMessageContent[] - checkpoints?: Checkpoint[] - compression_strength?: 'absent' | 'weak' | 'strong' -} - -export type AssistantMessage = { - role: 'assistant' - content: string - reasoning_content?: string - tool_calls?: ToolCall[] - thinking_blocks?: ThinkingBlock[] - citations?: WebSearchCitation[] - finish_reason?: 'stop' | 'length' | 'tool_calls' | null - usage?: Usage - // Metering fields - metering_balance?: number - metering_*_tokens_n?: number - metering_coins_*?: number -} - -export type ToolCall = { - id: string - index: number - function: { - name: string - arguments: string // JSON string - } - subchat?: string // Subchat ID if nested - attached_files?: string[] // Files attached to subchat -} - -export type ToolMessage = { - role: 'tool' - content: ToolResult -} - -export type ToolResult = { - tool_call_id: string - content: string | { type: 'image_url', image_url: { url: string } }[] - finish_reason?: 'stop' | 'length' | null - compression_strength?: 'absent' | 'weak' | 'strong' - tool_failed?: boolean -} - -// Diff types -export type DiffMessage = { - role: 'diff' - content: DiffChunk[] - tool_call_id?: string -} - -export type DiffChunk = { - file_name: string - file_action: 'A' | 'M' | 'D' - line1: number - line2: number - chunks: string // Unified diff -} - -// Response types (streaming deltas) -export type ChatResponse = - | ChatResponseChoice - | UserResponse - | ContextFileResponse - | ToolResponse - | DiffResponse - | SubchatResponse - | SystemResponse - | PlainTextResponse -``` - -**Type guards** (critical for message routing): - -```typescript -export function isUserMessage(msg: unknown): msg is UserMessage { - return ( - typeof msg === "object" && - msg !== null && - "role" in msg && - msg.role === "user" - ); -} - -export function isAssistantMessage(msg: unknown): msg is AssistantMessage { - return ( - typeof msg === "object" && - msg !== null && - "role" in msg && - msg.role === "assistant" - ); -} - -// ... 20+ more type guards -``` - -### Error Handling - -**RTK Query errors** are caught by middleware: - -```typescript -listenerMiddleware.startListening({ - matcher: isAnyOf( - capsApi.endpoints.getCaps.matchRejected, - toolsApi.endpoints.getTools.matchRejected, - // ... - ), - effect: (action, listenerApi) => { - const error = action.error; - listenerApi.dispatch( - addError({ - message: error.message ?? "Unknown error", - type: "GLOBAL", - }), - ); - }, -}); -``` - -**Chat errors** handled in thunk: - -```typescript -.catch((err: unknown) => { - dispatch(doneStreaming({ id: chatId })) - dispatch(chatError({ - id: chatId, - message: err instanceof Error ? err.message : String(err) - })) -}) -``` - ---- - -## IDE Integration - -### postMessage Architecture - -**Communication protocol** between GUI (iframe) and IDE extension (host) - -``` -┌─────────────────────────────────────────┐ -│ IDE Extension (VSCode/JetBrains) │ -│ │ -│ window.postMessage(event, '*') │ -└──────────────┬──────────────────────────┘ - │ - │ postMessage API - │ -┌──────────────▼──────────────────────────┐ -│ GUI (React in iframe/webview) │ -│ │ -│ window.addEventListener('message', ...) │ -└─────────────────────────────────────────┘ -``` - -### Message Flow Directions - -**1. IDE → GUI** (context updates, responses) - -Handled by: `src/hooks/useEventBusForApp.ts` - -```typescript -export function useEventBusForApp() { - const dispatch = useAppDispatch(); - - useEffect(() => { - const listener = (event: MessageEvent) => { - // File context update - if (setFileInfo.match(event.data)) { - dispatch(setFileInfo(event.data.payload)); - } - - // Selected code snippet - if (setSelectedSnippet.match(event.data)) { - dispatch(setSelectedSnippet(event.data.payload)); - } - - // New chat trigger - if (newChatAction.match(event.data)) { - if (!isPageInHistory({ pages }, "chat")) { - dispatch(push({ name: "chat" })); - } - dispatch(newChatAction(event.data.payload)); - } - - // Tool approval response - if (ideToolCallResponse.match(event.data)) { - dispatch(event.data); - } - - // ... more handlers - }; - - window.addEventListener("message", listener); - return () => window.removeEventListener("message", listener); - }, [dispatch]); -} -``` - -**2. GUI → IDE** (commands, requests) - -Handled by: `src/hooks/useEventBusForIDE.ts` - -```typescript -export const useEventsBusForIDE = () => { - const postMessage = usePostMessage(); - - const openFile = useCallback( - (file: OpenFilePayload) => { - const action = ideOpenFile(file); - postMessage(action); - }, - [postMessage], - ); - - const diffPasteBack = useCallback( - (content: string, chatId?: string) => { - const action = ideDiffPasteBackAction({ content, chatId }); - postMessage(action); - }, - [postMessage], - ); - - const sendToolCallToIde = useCallback( - (toolCall, edit, chatId) => { - const action = ideToolCall({ toolCall, edit, chatId }); - postMessage(action); - }, - [postMessage], - ); - - // ... 20+ command functions - - return { - openFile, - diffPasteBack, - sendToolCallToIde, - // ... - }; -}; -``` - -### postMessage Transport - -**Location**: `src/hooks/usePostMessage.ts` - -**Auto-detects host**: - -```typescript -export function usePostMessage() { - const config = useConfig(); - - return useCallback( - (message: unknown) => { - if (config.host === "vscode") { - // VSCode uses acquireVsCodeApi - const vscode = window.acquireVsCodeApi?.(); - vscode?.postMessage(message); - } else if (config.host === "jetbrains") { - // JetBrains uses custom function - window.postIntellijMessage?.(message); - } else { - // Web/generic: use window.postMessage - window.postMessage(message, "*"); - } - }, - [config.host], - ); -} -``` - -### Event Types - -**Defined in**: `src/events/setup.ts`, IDE action creators - -**Common events IDE → GUI**: - -| Event Type | Payload | Purpose | -| ----------------------- | -------------------------------- | -------------------- | -| `updateConfig` | `Partial` | Update global config | -| `setFileInfo` | `{file_name, can_paste}` | Active file changed | -| `setSelectedSnippet` | `{code, language}` | Code selection | -| `newChatAction` | `Partial` | Start new chat | -| `ideToolCallResponse` | `{toolCallId, chatId, accepted}` | Tool approval | -| `setCurrentProjectInfo` | `{name, path}` | Project context | - -**Common events GUI → IDE**: - -| Event Type | Payload | Purpose | -| --------------------------- | -------------------------- | ----------------------- | -| `ideOpenFile` | `{file_path, line?}` | Open file in editor | -| `ideDiffPasteBack` | `{content, chatId}` | Apply code changes | -| `ideToolCall` | `{toolCall, edit, chatId}` | Request tool execution | -| `ideOpenSettings` | - | Open settings UI | -| `ideNewFile` | `{content}` | Create new file | -| `ideAnimateFileStart/Stop` | `{file_name}` | File activity indicator | -| `ideChatPageChange` | `{page}` | Navigation event | -| `ideSetCodeCompletionModel` | `{model}` | Update model | -| `ideSetActiveTeamsGroup` | `{group}` | Set active team | - -### Host Mode Differences - -**Config**: `state.config.host: 'web' | 'vscode' | 'jetbrains' | 'ide'` - -| Feature | web | vscode | jetbrains | ide | -| ------------------------ | -------------------- | -------------------- | ----------------------- | ---------- | -| **postMessage** | `window.postMessage` | `acquireVsCodeApi()` | `postIntellijMessage()` | Generic | -| **Theme** | Toggle in UI | VSCode controls | JB controls | Generic | -| **File links** | ❌ No-op | ✅ Opens in editor | ✅ Opens in IDE | ✅ Generic | -| **Copy buttons** | ✅ Visible | ❌ Hidden | ❌ Hidden | ❌ Hidden | -| **Tool execution** | LSP only | LSP + IDE | LSP + IDE | LSP + IDE | -| **Paste to file** | ❌ No-op | ✅ Works | ✅ Works | ✅ Works | -| **Project tree refresh** | N/A | N/A | ✅ Auto-refresh | N/A | - -**Host detection**: - -```typescript -const config = useConfig(); -const isIDE = config.host !== "web"; -const isVSCode = config.host === "vscode"; -const isJetBrains = config.host === "jetbrains"; -``` - -### Tool Approval Flow (IDE-specific) - -**For patch-like tools**, IDE shows preview before applying: - -``` -1. AI suggests patch tool_call - ↓ -2. GUI: Confirmation popup (if not automatic_patch) - ↓ -3. User confirms - ↓ -4. GUI → IDE: ideToolCall({toolCall, edit, chatId}) - ↓ -5. IDE: Shows diff preview - ↓ -6. User: Applies or rejects - ↓ -7. IDE → GUI: ideToolCallResponse({toolCallId, chatId, accepted}) - ↓ -8. GUI middleware: Updates tool status, continues chat -``` - -**Web mode**: All tools executed by LSP directly (no IDE approval step) - ---- - -## Tool Calling System - -### Overview - -The tool calling system allows AI to execute functions (file operations, shell commands, searches, etc.) with optional user confirmation. - -### Tool Call Lifecycle - -``` -1. AI Response with tool_calls - ↓ -2. [Confirmation Gate] ← configurable - ↓ -3. Tool Execution (LSP or IDE) - ↓ -4. Tool Result inserted as message - ↓ -5. AI continues with result - ↓ -6. Loop until finish_reason: "stop" -``` - -### Confirmation Logic - -**Location**: `src/hooks/useSendChatRequest.ts` (lines 138-201) - -**Decision tree**: - -```typescript -async function sendMessages(messages, maybeMode) { - dispatch(setIsWaitingForResponse(true)); - const lastMessage = messages.slice(-1)[0]; - - // Check if last message has tool_calls - if ( - !isWaiting && - !wasInteracted && - isAssistantMessage(lastMessage) && - lastMessage.tool_calls - ) { - const toolCalls = lastMessage.tool_calls; - - // Check for automatic bypass - if ( - toolCalls[0].function.name && - PATCH_LIKE_FUNCTIONS.includes(toolCalls[0].function.name) && - isPatchAutomatic // ← per-chat setting - ) { - // Skip confirmation for patch-like tools in automatic mode - } else { - // Ask backend if confirmation needed - const confirmationResponse = await triggerCheckForConfirmation({ - tool_calls: toolCalls, - messages: messages, - }).unwrap(); - - if (confirmationResponse.pause) { - dispatch(setPauseReasons(confirmationResponse.pause_reasons)); - return; // STOP - show confirmation UI - } - } - } - - // Proceed with LSP call - dispatch(backUpMessages({ id: chatId, messages })); - dispatch(chatAskedQuestion({ id: chatId })); - // ... sendChat() -} -``` - -### PATCH_LIKE_FUNCTIONS - -**These tools auto-approve when `automatic_patch === true`**: - -```typescript -export const PATCH_LIKE_FUNCTIONS = [ - "patch", - "text_edit", - "create_textdoc", - "update_textdoc", - "replace_textdoc", - "update_textdoc_regex", - "update_textdoc_by_lines", -]; -``` - -### Confirmation API - -**Endpoint**: `POST /v1/tools/check_confirmation` - -**Request**: - -```json -{ - "tool_calls": [ - { - "id": "call_123", - "function": { - "name": "patch", - "arguments": "{\"file_path\":\"src/app.ts\",...}" - } - } - ], - "messages": [ - /* full context */ - ] -} -``` - -**Response**: - -```json -{ - "pause": true, - "pause_reasons": [ - { - "type": "confirmation", - "rule": "*.py files require approval", - "tool_call_id": "call_123" - } - ] -} -``` - -**If `pause === false`**: Tool executes immediately -**If `pause === true`**: Show ToolConfirmation popup - -### ToolConfirmation Component - -**Location**: `src/components/ChatForm/ToolConfirmation.tsx` - -**UI shows**: - -- **Tool name** (e.g., "patch") -- **Arguments** (collapsible JSON) -- **Pause reason** (e.g., "requires approval") -- **Three buttons**: - - 🟢 **Allow Once** - Confirm this tool, continue - - 🟢 **Allow Chat** - Enable automatic mode for this chat - - 🔴 **Stop** - Reject tool, end chat - -**User actions**: - -```typescript -// Allow Once -const confirmToolUsage = () => { - dispatch( - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: true, - confirmationStatus: true, - }), - ); - dispatch(setIsWaitingForResponse(false)); - // useAutoSend will detect clear and continue -}; - -// Allow Chat -const enableAutomaticPatch = () => { - dispatch(setAutomaticPatch({ chatId, value: true })); - confirmToolUsage(); -}; - -// Stop -const rejectToolUsage = (toolCallIds) => { - toolCallIds.forEach((id) => { - dispatch(upsertToolCall({ toolCallId: id, chatId, accepted: false })); - }); - dispatch(resetConfirmationInteractedState()); - dispatch(setIsWaitingForResponse(false)); - dispatch(doneStreaming({ id: chatId })); - dispatch(setPreventSend({ id: chatId })); -}; -``` - -### Tool Execution Paths - -**Two execution models**: - -#### 1. LSP-Executed Tools (Most tools) - -``` -GUI → LSP /v1/chat with tool_calls → LSP executes → Returns tool result -``` - -**Examples**: `shell`, `read_file`, `search`, `web_search`, etc. - -**Result format**: - -```json -{ - "role": "tool", - "tool_call_id": "call_123", - "content": "Command output...", - "finish_reason": "stop" -} -``` - -#### 2. IDE-Executed Tools (Patch-like tools) - -``` -GUI → LSP /v1/chat with tool_calls - ↓ -LSP returns tool instruction (not executed yet) - ↓ -GUI → IDE: ideToolCall({toolCall, edit, chatId}) - ↓ -IDE: Shows diff preview, user applies/rejects - ↓ -IDE → GUI: ideToolCallResponse({toolCallId, chatId, accepted}) - ↓ -GUI: Inserts tool result, continues chat -``` - -**Edit format** (`ToolEditResult`): - -```typescript -type ToolEditResult = { - file_name: string; - file_action: "A" | "M" | "D"; - line1: number; - line2: number; - chunks: string; // Unified diff -}; -``` - -### Server-Executed Tools - -**Special case**: Tools with `id.startsWith('srvtoolu_')` - -**Behavior**: - -- Already executed by LLM provider (e.g., Claude with computer use) -- GUI shows badge: ☁️ "Server tool" -- NOT sent to LSP for execution -- Display only (no confirmation needed) - -**Detection**: - -```typescript -export function isServerExecutedTool(toolCallId?: string): boolean { - return toolCallId?.startsWith("srvtoolu_") ?? false; -} -``` - -### Tool Result Insertion - -**Via IDE approval** (middleware listener): - -```typescript -listenerMiddleware.startListening({ - actionCreator: ideToolCallResponse, - effect: (action, listenerApi) => { - const { toolCallId, chatId, accepted } = action.payload; - - // 1. Update history - listenerApi.dispatch( - upsertToolCallIntoHistory({ - toolCallId, - chatId, - accepted, - }), - ); - - // 2. Insert/update tool result in messages - listenerApi.dispatch( - upsertToolCall({ - toolCallId, - chatId, - accepted, - }), - ); - - // 3. Remove pause reason - listenerApi.dispatch( - updateConfirmationAfterIdeToolUse({ - toolCallId, - }), - ); - - // 4. Continue chat if no more pauses - const state = listenerApi.getState(); - if (state.confirmation.pauseReasons.length === 0 && accepted) { - listenerApi.dispatch( - sendCurrentChatToLspAfterToolCallUpdate({ - chatId, - toolCallId, - }), - ); - } - }, -}); -``` - -**Via streaming** (LSP returns tool message): - -- Handled by `formatChatResponse` in reducer -- Tool message appended to `thread.messages` - -### Tool Loop Prevention - -**Problem**: AI might call same tool repeatedly (infinite loop) - -**Solution**: `checkForToolLoop(messages)` in actions - -```typescript -function checkForToolLoop(messages): boolean { - // Get recent assistant+tool messages - const recentMessages = takeFromEndWhile(messages, msg => - isToolMessage(msg) || isToolCallMessage(msg) - ) - - // Extract tool calls and results - const toolCalls = /* ... */ - const toolResults = /* ... */ - - // Check for duplicates (same tool, args, AND result) - return scanForDuplicatesWith(toolCalls, (a, b) => { - const aResult = toolResults.find(msg => msg.content.tool_call_id === a.id) - const bResult = toolResults.find(msg => msg.content.tool_call_id === b.id) - - return ( - a.function.name === b.function.name && - a.function.arguments === b.function.arguments && - aResult?.content === bResult?.content - ) - }) -} -``` - -**If loop detected**: - -- Sets `only_deterministic_messages: true` in LSP request -- Stops streaming to prevent infinite loop - -### Subchat System - -**Feature**: Tools can spawn nested chats - -**Use case**: Multi-step research, recursive search - -**Flow**: - -``` -Tool call → LSP creates subchat → Subchat executes → Files attached to parent tool -``` - -**Message format**: - -```typescript -type SubchatResponse = { - subchat_id: string; - tool_call_id: string; - add_message: ContextFileResponse; -}; -``` - -**Rendering**: ToolsContent renders nested subchats recursively (max 5 deep) - -### Tool Status States - -```typescript -type ToolStatus = - | "thinking" // ⏳ Executing - | "success" // ✅ Completed - | "error" // ❌ Failed - | "server"; // ☁️ Server-executed (display only) -``` - -**Visual indicators** in ToolsContent component - -### Common Tool Types - -| Tool | Purpose | Execution | Confirmation? | -| --------------------------- | -------------- | --------- | ------------- | -| `patch` | Edit files | IDE | Optional | -| `text_edit` | Edit files | IDE | Optional | -| `shell` | Run commands | LSP | Optional | -| `read_file` | Read file | LSP | Rare | -| `search` | Code search | LSP | No | -| `web_search` | Search web | LSP | No | -| `knowledge` | Vec DB search | LSP | No | -| `textdoc` | Browse project | LSP | No | -| `remember_how_to_use_tools` | Save notes | LSP | No | - ---- - -## Development Workflows - -### How to Add a New Redux Slice - -**1. Create slice file**: - -```typescript -// src/features/MyFeature/myFeatureSlice.ts -import { createSlice } from "@reduxjs/toolkit"; - -export type MyFeatureState = { - data: string[]; - loading: boolean; -}; - -const initialState: MyFeatureState = { - data: [], - loading: false, -}; - -export const myFeatureSlice = createSlice({ - name: "myFeature", - initialState, - reducers: { - setData: (state, action: PayloadAction) => { - state.data = action.payload; - }, - setLoading: (state, action: PayloadAction) => { - state.loading = action.payload; - }, - }, - selectors: { - selectData: (state) => state.data, - selectLoading: (state) => state.loading, - }, -}); - -export const { setData, setLoading } = myFeatureSlice.actions; -export const { selectData, selectLoading } = myFeatureSlice.selectors; -``` - -**2. Register in store**: - -```typescript -// src/app/store.ts -import { myFeatureSlice } from "../features/MyFeature/myFeatureSlice"; - -const rootReducer = combineSlices( - chatSlice, - historySlice, - myFeatureSlice, // ← Add here - // ... -); -``` - -**3. Use in components**: - -```typescript -import { useAppSelector, useAppDispatch } from '@/hooks' -import { selectData, setData } from '@/features/MyFeature/myFeatureSlice' - -function MyComponent() { - const data = useAppSelector(selectData) - const dispatch = useAppDispatch() - - return ( - - ) -} -``` - -### How to Add a New API Endpoint - -**Using RTK Query**: - -**1. Create API file**: - -```typescript -// src/services/refact/myApi.ts -import { createApi } from "@reduxjs/toolkit/query/react"; -import { baseQueryWithAuth } from "./index"; - -export const myApi = createApi({ - reducerPath: "myApi", - baseQuery: baseQueryWithAuth, - endpoints: (builder) => ({ - getMyData: builder.query({ - query: ({ id }) => `/v1/my-endpoint/${id}`, - }), - updateMyData: builder.mutation({ - query: ({ id, data }) => ({ - url: `/v1/my-endpoint/${id}`, - method: "POST", - body: data, - }), - }), - }), -}); - -export const { useGetMyDataQuery, useUpdateMyDataMutation } = myApi; -``` - -**2. Register in store**: - -```typescript -// src/app/store.ts -import { myApi } from "../services/refact/myApi"; - -const rootReducer = combineSlices( - // ... other slices - myApi, // ← RTK Query auto-registers -); - -const store = configureStore({ - reducer: rootReducer, - middleware: (getDefaultMiddleware) => - getDefaultMiddleware() - .prepend(listenerMiddleware.middleware) - .concat(myApi.middleware), // ← Add middleware -}); -``` - -**3. Use in components**: - -```typescript -import { useGetMyDataQuery, useUpdateMyDataMutation } from '@/services/refact/myApi' - -function MyComponent() { - const { data, isLoading, error } = useGetMyDataQuery({ id: '123' }) - const [updateData] = useUpdateMyDataMutation() - - return ( -
- {isLoading && } - {error && {error.message}} - {data &&
{data.value}
} -
- ) -} -``` - -### How to Add a New Component - -**1. Create component directory**: - -``` -src/components/MyComponent/ -├── MyComponent.tsx -├── MyComponent.module.css -├── MyComponent.stories.tsx -├── MyComponent.test.tsx (optional) -└── index.ts -``` - -**2. Component file**: - -```typescript -// MyComponent.tsx -import React from 'react' -import { Flex, Text } from '@radix-ui/themes' -import styles from './MyComponent.module.css' - -export interface MyComponentProps { - title: string - onAction?: () => void -} - -export function MyComponent({ title, onAction }: MyComponentProps) { - return ( - - {title} - {onAction && ( - - )} - - ) -} -``` - -**3. CSS Module**: - -```css -/* MyComponent.module.css */ -.container { - padding: var(--space-3); - border-radius: var(--radius-2); - background: var(--color-surface); -} - -.button { - padding: var(--space-2) var(--space-3); - border: 1px solid var(--gray-6); - border-radius: var(--radius-2); - background: var(--accent-3); - color: var(--accent-11); - cursor: pointer; -} - -.button:hover { - background: var(--accent-4); -} -``` - -**4. Storybook story**: - -```typescript -// MyComponent.stories.tsx -import type { Meta, StoryObj } from "@storybook/react"; -import { MyComponent } from "./MyComponent"; - -const meta: Meta = { - title: "Components/MyComponent", - component: MyComponent, - tags: ["autodocs"], -}; - -export default meta; -type Story = StoryObj; - -export const Default: Story = { - args: { - title: "Example Title", - }, -}; - -export const WithAction: Story = { - args: { - title: "Clickable", - onAction: () => alert("Clicked!"), - }, -}; -``` - -**5. Index file**: - -```typescript -// index.ts -export { MyComponent } from "./MyComponent"; -export type { MyComponentProps } from "./MyComponent"; -``` - -### How to Add a New Hook - -**1. Create hook file**: - -```typescript -// src/hooks/useMyHook.ts -import { useState, useEffect } from "react"; -import { useAppSelector } from "./useAppSelector"; - -export function useMyHook(param: string) { - const [result, setResult] = useState(null); - const config = useAppSelector((state) => state.config); - - useEffect(() => { - // Hook logic here - const value = processParam(param, config); - setResult(value); - }, [param, config]); - - return result; -} -``` - -**2. Export from index**: - -```typescript -// src/hooks/index.ts -export * from "./useMyHook"; -``` - -**3. Use in components**: - -```typescript -import { useMyHook } from '@/hooks' - -function MyComponent() { - const result = useMyHook('input') - return
{result}
-} -``` - -### Project Conventions - -**File naming**: - -- Components: `PascalCase.tsx` -- Hooks: `useCamelCase.ts` -- Utilities: `camelCase.ts` -- Types: `PascalCase.ts` or `types.ts` -- CSS Modules: `PascalCase.module.css` - -**Import order**: - -1. React imports -2. Third-party imports -3. Internal imports (features, components, hooks) -4. Types -5. Styles - -**TypeScript**: - -- Always use types/interfaces (no `any`) -- Prefer `type` over `interface` (unless extending) -- Export types from same file as implementation - -**Testing**: - -- Test files next to implementation: `MyComponent.test.tsx` -- Use `describe` blocks for grouping -- Mock external dependencies with MSW - ---- - -## Testing - -### Testing Stack - -- **Framework**: Vitest 3.1 -- **React Testing**: React Testing Library 16.0 -- **Mocking**: MSW 2.3 (Mock Service Worker) -- **Environment**: happy-dom (lightweight DOM) -- **Coverage**: Vitest coverage-v8 - -### Test Setup - -**Global setup**: `src/utils/test-setup.ts` - -```typescript -import { beforeAll, afterEach, vi } from "vitest"; -import { cleanup } from "@testing-library/react"; - -beforeAll(() => { - // Stub browser APIs - stubResizeObserver(); - stubIntersectionObserver(); - Element.prototype.scrollIntoView = vi.fn(); - - // Mock localStorage - global.localStorage = { - getItem: vi.fn(() => null), - setItem: vi.fn(), - removeItem: vi.fn(), - clear: vi.fn(), - key: vi.fn(() => null), - length: 0, - }; -}); - -afterEach(() => { - cleanup(); // Clean up React components -}); - -// Mock lottie animations -vi.mock("lottie-react", () => ({ - default: vi.fn(), - useLottie: vi.fn(() => ({ - View: React.createElement("div"), - playSegments: vi.fn(), - })), -})); -``` - -### Custom Render Function - -**Location**: `src/utils/test-utils.tsx` - -```typescript -import { render as rtlRender } from '@testing-library/react' -import { Provider } from 'react-redux' -import { setUpStore } from '../app/store' - -function customRender( - ui: ReactElement, - { - preloadedState, - store = setUpStore(preloadedState), - ...renderOptions - }: ExtendedRenderOptions = {} -) { - const user = userEvent.setup() - - function Wrapper({ children }: PropsWithChildren) { - return ( - - - - - {children} - - - - - ) - } - - return { - ...rtlRender(ui, { wrapper: Wrapper, ...renderOptions }), - store, - user - } -} - -export { customRender as render } -export * from '@testing-library/react' -``` - -**Usage**: - -```typescript -import { render, screen, waitFor } from '@/utils/test-utils' - -test('renders chat', () => { - render(, { - preloadedState: { - chat: { thread: { messages: [] } } - } - }) - expect(screen.getByText('Chat')).toBeInTheDocument() -}) -``` - -### MSW Setup - -**Worker**: `public/mockServiceWorker.js` (generated by MSW) - -**Handlers**: `src/__fixtures__/msw.ts` - -```typescript -import { setupServer } from "msw/node"; -import { http, HttpResponse } from "msw"; - -export const handlers = [ - http.get("http://127.0.0.1:8001/v1/caps", () => { - return HttpResponse.json({ - chat_default_model: "gpt-4", - chat_models: { - "gpt-4": { n_ctx: 8192 }, - }, - }); - }), - - http.post("http://127.0.0.1:8001/v1/chat", async ({ request }) => { - const body = await request.json(); - // Return streaming response - const stream = new ReadableStream({ - start(controller) { - controller.enqueue( - new TextEncoder().encode('data: {"choices":[...]}\n\n'), - ); - controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n")); - controller.close(); - }, - }); - return new HttpResponse(stream, { - headers: { "Content-Type": "text/event-stream" }, - }); - }), -]; - -export const server = setupServer(...handlers); - -// Start server before tests -beforeAll(() => server.listen()); -afterEach(() => server.resetHandlers()); -afterAll(() => server.close()); -``` - -### Fixtures - -**Location**: `src/__fixtures__/` - -**20+ fixture files** for test data: - -```typescript -// caps.ts -export const STUB_CAPS_RESPONSE = { - chat_default_model: "gpt-4", - chat_models: { - /* ... */ - }, -}; - -// chat.ts -export const STUB_CHAT_MESSAGES = [ - { role: "user", content: "Hello" }, - { role: "assistant", content: "Hi there!" }, -]; - -// tools_response.ts -export const STUB_TOOL_CALL = { - id: "call_123", - function: { name: "shell", arguments: '{"cmd":"ls"}' }, -}; -``` - -### Example Tests - -**Component test**: - -```typescript -// ChatForm.test.tsx -import { render, screen, waitFor } from '@/utils/test-utils' -import { ChatForm } from './ChatForm' - -describe('ChatForm', () => { - test('sends message on submit', async () => { - const { user } = render() - - const input = screen.getByRole('textbox') - await user.type(input, 'Hello AI') - - const button = screen.getByRole('button', { name: /send/i }) - await user.click(button) - - await waitFor(() => { - expect(screen.getByText('Sending...')).toBeInTheDocument() - }) - }) - - test('disables send when empty', () => { - render() - const button = screen.getByRole('button', { name: /send/i }) - expect(button).toBeDisabled() - }) -}) -``` - -**Hook test**: - -```typescript -// useSendChatRequest.test.ts -import { renderHook, waitFor } from "@testing-library/react"; -import { useSendChatRequest } from "./useSendChatRequest"; - -test("submit sends message", async () => { - const { result } = renderHook(() => useSendChatRequest()); - - act(() => { - result.current.submit({ question: "Test" }); - }); - - await waitFor(() => { - expect(result.current.isWaiting).toBe(true); - }); -}); -``` - -### Running Tests - -```bash -# Watch mode (default) -npm test - -# Run once (CI) -npm run test:no-watch - -# Coverage report -npm run coverage - -# UI mode (visual test runner) -npm run test:ui -``` - -### Storybook as Dev Tool - -**Storybook** serves as visual component documentation: - -```bash -npm run storybook # Start on :6006 -``` - -**30+ stories** across components, showcasing: - -- Different states (loading, error, success) -- Edge cases (empty, long text, special chars) -- Interactive controls (change props live) - -**Stories use MSW** for API mocking: - -```typescript -// ChatContent.stories.tsx -export const Streaming: Story = { - parameters: { - msw: { - handlers: [ - http.post('/v1/chat', () => /* streaming response */) - ] - } - } -} -``` - ---- - -## Debugging - -### Debug Mode - -**Enable logging**: - -```bash -DEBUG=refact,app,integrations npm run dev -``` - -**Debug namespaces**: - -- `refact` - Core chat logic -- `app` - Application lifecycle -- `integrations` - Integration system -- `*` - Everything - -**Location**: `src/debugConfig.ts` - -```typescript -import debug from "debug"; - -export const debugRefact = debug("refact"); -export const debugApp = debug("app"); -export const debugIntegrations = debug("integrations"); - -// Usage in code: -debugRefact("Sending message: %O", message); -``` - -### Redux DevTools - -**Auto-enabled in development**: - -```typescript -const store = configureStore({ - reducer: rootReducer, - middleware: /* ... */, - devTools: process.env.NODE_ENV !== 'production' // ← Auto-enabled -}) -``` - -**Features**: - -- Time-travel debugging -- Action replay -- State diff viewer -- Performance monitoring - -**Max actions**: 50 (configured in store) - -### Console Logging Patterns - -**Guarded logs** (most of codebase): - -```typescript -if (process.env.NODE_ENV === "development") { - console.log("Debug info:", data); -} -``` - -**Production logs** (errors only): - -```typescript -console.error("Critical error:", error); -``` - -**~5% of code has console.log** - minimal logging philosophy - -### Telemetry - -**Location**: `src/services/refact/telemetry.ts` - -**What's tracked**: - -```typescript -telemetryApi.useSendTelemetryChatEventMutation() - -// Events tracked: -{ - scope: 'replaceSelection' | 'ideOpenFile/customization.yaml' | 'copyToClipboard', - success: boolean, - error_message: string -} -``` - -**Telemetry is opt-in** (configured in LSP server) - -### Common Issues & Solutions - -#### Issue: Messages not sending - -**Triage**: - -```typescript -// Check these selectors in Redux DevTools: -state.chat.prevent_send; // Should be false -state.chat.waiting_for_response; // Should be false when idle -state.chat.streaming; // Should be false when idle -state.confirmation.pauseReasons; // Should be empty [] -``` - -**Fix**: - -- If `prevent_send: true` → Click "Retry" or start new chat -- If paused → Check ToolConfirmation popup, confirm or reject -- If streaming stuck → Reload app - -#### Issue: Tool confirmation stuck - -**Triage**: - -```typescript -state.confirmation.pauseReasons; // What's blocking? -state.confirmation.wasInteracted; // Did user interact? -``` - -**Fix**: - -- Check if IDE sent `ideToolCallResponse` -- Check middleware listener is running -- Confirm/reject manually in UI - -#### Issue: Streaming stopped mid-response - -**Triage**: - -- Check browser console for errors -- Check Network tab for aborted requests -- Check if `doneStreaming` was called prematurely - -**Fix**: - -- LSP server issue (restart LSP) -- Network interruption (retry) -- Check abort controller logic - -#### Issue: Dark mode not working - -**Triage**: - -```typescript -state.config.themeProps.appearance; // What's set? -document.body.className; // Should be 'vscode-dark' or 'vscode-light' -``` - -**Fix**: - -- Check middleware listener for appearance changes -- Verify Radix Theme is wrapping app -- Check if host is controlling theme - -#### Issue: postMessage not working - -**Triage**: - -```typescript -state.config.host; // Should match actual host -window.acquireVsCodeApi; // Exists in VSCode? -window.postIntellijMessage; // Exists in JetBrains? -``` - -**Fix**: - -- Verify host type is correct -- Check IDE extension is sending messages -- Check event listeners are attached - -### Performance Debugging - -**React DevTools Profiler**: - -- Record chat interaction -- Look for long renders (>16ms) -- Check component re-render count - -**Common bottlenecks**: - -- Large message arrays (use selectors, not direct state) -- Markdown rendering (memoize with React.memo) -- Recursive renderMessages (optimize with useCallback) - -### Network Debugging - -**Check requests in Network tab**: - -| Endpoint | Expected Response | Check | -| ----------- | ----------------- | --------------------------- | -| `/v1/caps` | JSON | 200 OK | -| `/v1/chat` | SSE stream | 200 OK, `text/event-stream` | -| `/v1/tools` | JSON | 200 OK | - -**Common issues**: - -- CORS errors → LSP server not running -- 401 Unauthorized → Check `state.config.apiKey` -- Connection refused → Wrong LSP port - -### Debug Checklist - -When investigating issues: - -- [ ] Check Redux state in DevTools -- [ ] Check browser console for errors -- [ ] Check Network tab for failed requests -- [ ] Enable DEBUG logging -- [ ] Check LSP server is running (`:8001/v1/ping`) -- [ ] Verify host type matches environment -- [ ] Check middleware listeners are registered -- [ ] Review recent actions in Redux timeline -- [ ] Check for pause reasons blocking flow -- [ ] Verify messages array structure - ---- - -## Special Features - -### Checkpoints System - -**Purpose**: Rollback workspace to previous state (undo AI code changes) - -**Location**: `src/features/Checkpoints/` - -**How it works**: - -``` -User message → AI makes changes → Checkpoint created - ↓ - {workspace_folder, commit_hash} - ↓ - Attached to user message - ↓ - User clicks 🔄 Reset button - ↓ - Preview changes (API call) - ↓ - Apply rollback (API call) - ↓ - Files reverted + chat truncated -``` - -**API Endpoints**: - -```typescript -// Preview what will change -POST /v1/preview_checkpoints -{ - "checkpoints": [ - { "workspace_folder": "/path", "commit_hash": "abc123" } - ] -} -// Returns: { files: [{file_name, status: 'A'|'M'|'D'}], error_log: string } - -// Apply rollback -POST /v1/restore_checkpoints -{ - "checkpoints": [/* same */] -} -// Returns: { success: boolean, error_log?: string } -``` - -**UI Components**: - -- `CheckpointButton` - Per-message reset button -- `Checkpoints` modal - Shows file changes before apply -- `CheckpointsStatusIndicator` - Visual feedback - -**State**: - -```typescript -state.checkpoints = { - previewData: { files: [...], error_log: '' } | null, - restoreInProgress: boolean -} -``` - -**After restore**: - -- Chat history truncates to checkpoint message -- OR starts new chat with context -- IDE reloads affected files (JetBrains auto-refresh) - -### Docker Integration - -**Purpose**: Manage Docker containers from chat UI - -**Location**: `src/components/IntegrationsView/IntegrationDocker/` - -**Features**: - -- List containers by image/label -- Start/Stop/Kill/Remove actions -- View environment variables -- SmartLinks for AI context - -**API**: - -```typescript -// List containers -POST /v1/docker-container-list -{ "docker_image_name": "postgres", "docker_container_labels": ["app=myapp"] } -// Returns: { containers: [{ id, name, status, ports, env, ... }] } - -// Execute action -POST /v1/docker-container-action -{ "container_id": "abc123", "action": "start" } -// Returns: { success: boolean, message: string } -``` - -**UI**: - -- `DockerContainerCard` - Shows container details -- Actions dropdown: Start, Stop, Kill, Remove -- Env vars collapsible -- SmartLinks feed container info to AI - -**Use case**: AI can reference containers in responses, user manages from UI - -### Compression Hints - -**Purpose**: Alert user when context is too large - -**Indicator**: 🗜️ icon on user messages - -**Detection**: LSP returns `compression_strength` in response: - -```typescript -type CompressionStrength = "absent" | "weak" | "strong"; -``` - -**When shown**: - -- `weak` - Context approaching limit -- `strong` - Context exceeds recommended size - -**Action**: - -- Show "Start New Chat" suggestion -- User can reject or accept suggestion - -**State**: - -```typescript -thread.new_chat_suggested = { - wasSuggested: boolean, - wasRejectedByUser?: boolean -} -``` - -### Memory System (Context Files) - -**Feature**: AI can remember information across chats - -**Indicator**: 🗃️ icon on messages - -**How it works**: - -1. AI calls `remember_how_to_use_tools()` -2. Notes saved to vector DB -3. Relevant notes attached to future messages -4. Shows as `context_file` messages - -**Message type**: - -```typescript -type ContextFileMessage = { - role: "context_file"; - content: ChatContextFile[]; -}; - -type ChatContextFile = { - file_name: string; - file_content: string; - line1: number; - line2: number; -}; -``` - -**Rendering**: ContextFiles component shows attached files - -### Queued Messages - -**Purpose**: Send multiple messages while AI is responding - -**How it works**: - -- User sends message while streaming → Message queued -- Queue has priority levels: - - `priority: true` - Send immediately after current stream - - `priority: false` - Send after tools complete - -**State**: - -```typescript -type QueuedUserMessage = { - id: string - message: UserMessage - createdAt: number - priority?: boolean -} - -state.chat.queued_messages: QueuedUserMessage[] -``` - -**Auto-flush** handled by `useAutoSend()` hook - -**Visual**: QueuedMessage component shows pending messages - -### Multi-Modal Support - -**Images in user messages**: - -```typescript -{ - role: 'user', - content: [ - { type: 'text', text: 'What's in this image?' }, - { type: 'image_url', image_url: { url: 'data:image/png;base64,...' } } - ] -} -``` - -**Images in tool results**: - -```typescript -{ - role: 'tool', - content: [ - { type: 'image_url', image_url: { url: 'http://...' } } - ] -} -``` - -**UI**: `DialogImage` component for lightbox view - -### Smart Links - -**Purpose**: Context-aware actions in chat - -**Format**: Special markdown links - -```markdown -[🔗 Open file.py:42](smartlink://open?file=file.py&line=42) -``` - -**Rendered by**: `SmartLink` component - -**Actions**: - -- Open file at line -- Run command -- Navigate to integration -- Apply configuration - -### Usage Tracking - -**Shows in UI**: Token counts, cost estimates - -**Data sources**: - -```typescript -message.usage = { - prompt_tokens: number, - completion_tokens: number, - total_tokens: number, - cache_read_input_tokens?: number, - cache_creation_input_tokens?: number -} - -// Metering (coins for SmallCloud) -message.metering_balance?: number -message.metering_*_tokens_n?: number -message.metering_coins_*?: number -``` - -**Component**: `UsageCounter` - Shows breakdown of token usage - -### Reasoning Content - -**Feature**: Separate field for model's reasoning (Claude, o1, etc.) - -**Format**: - -```typescript -{ - role: 'assistant', - content: 'Here's my answer', // Main response - reasoning_content: 'First I thought...' // Reasoning (hidden by default) -} -``` - -**UI**: `ReasoningContent` component - Collapsible section - -### Thinking Blocks - -**Feature**: Structured reasoning blocks (different from reasoning_content) - -```typescript -type ThinkingBlock = { - thinking: string; // Reasoning text - signature?: string; // Model signature/metadata -}; - -message.thinking_blocks = [{ thinking: "...", signature: "..." }]; -``` - -**Rendered in**: AssistantInput (collapsible) - ---- - -## Quick Reference - -### File Structure Cheat Sheet - -``` -src/ -├── app/ # Redux store, middleware, storage -├── components/ # Reusable UI (40+ components) -├── features/ # Redux slices + feature UIs (25+ features) -├── hooks/ # Custom hooks (60+) -├── services/ # API definitions (refact + smallcloud) -├── events/ # IDE integration types -├── lib/ # Library entry + render function -├── utils/ # Utility functions -├── __fixtures__/ # Test data (20+ files) -└── debugConfig.ts # Debug namespaces -``` - -### Key Commands - -```bash -# Development -npm ci # Install deps -npm run dev # Dev server -npm run build # Build library -npm test # Run tests -npm run storybook # Component explorer -npm run lint # Lint code -npm run types # Type check -DEBUG=* npm run dev # Debug mode - -# Publishing -npm run alpha:version # Bump alpha version -npm run alpha:publish # Publish to npm -``` - -### Important Patterns - -**Redux**: - -- Use selectors (don't access state directly) -- Use RTK Query for APIs -- Use listeners for cross-cutting concerns - -**Components**: - -- Use Radix primitives + CSS Modules -- Use design tokens (no magic numbers) -- Memoize expensive renders - -**Hooks**: - -- Export from `hooks/index.ts` -- Use `useAppSelector`/`useAppDispatch` wrappers -- Follow `use` prefix convention - -**Types**: - -- Use type guards for message routing -- Export types with implementation -- Strict TypeScript mode (no `any`) - -### Critical State Invariants - -```typescript -// Chat can send if ALL true: -!state.chat.prevent_send -!state.chat.waiting_for_response -!state.chat.streaming -!selectHasUncalledTools(state) -state.confirmation.pauseReasons.length === 0 - -// Tool confirmation needed if: -lastMessage.tool_calls exists -!wasInteracted -!(isPatchLike && automatic_patch) - -// Queue flushes when: -// Priority: base conditions (no streaming, no waiting) -// Regular: base + no tools + no pause reasons -``` - -### Common Gotchas - -1. **Don't mutate state** - Redux Toolkit allows in reducers, but not elsewhere -2. **Don't skip selectors** - Always use memoized selectors -3. **Don't bypass type guards** - Use `isAssistantMessage()` etc. -4. **Don't hardcode colors/spacing** - Use Radix tokens -5. **Don't forget to register** - New slices/APIs must be registered in store -6. **Don't block the UI** - Use abort controllers for cancellable requests -7. **Don't trust streaming order** - Handle out-of-order chunks -8. **Don't forget pause reasons** - Tool confirmation can block everything - -### Debugging Quick Wins - -```typescript -// Check state in console: -window.__REDUX_DEVTOOLS_EXTENSION__; - -// Force re-render: -dispatch(newChatAction()); - -// Clear pause: -dispatch( - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: false, - confirmationStatus: true, - }), -); - -// Reset prevent_send: -dispatch(enableSend({ id: chatId })); - -// Check LSP health: -fetch("http://127.0.0.1:8001/v1/ping").then((r) => r.json()); -``` - ---- - -## For AI Coding Agents - -### When Modifying Message Flow - -**MUST CHECK**: - -1. State transitions (`waiting_for_response`, `streaming`, `prevent_send`) -2. Tool confirmation logic (don't break pause system) -3. Queue flush conditions (priority vs regular) -4. Abort handling (cleanup state properly) -5. Message formatting (use `formatChatResponse`) -6. Type guards (don't assume message structure) - -### When Adding Message Types - -**MUST DO**: - -1. Add type definition in `services/refact/types.ts` -2. Add type guard (`isMyMessage`) -3. Update `formatChatResponse` to handle it -4. Update `renderMessages` to render it -5. Create component for rendering -6. Update `formatMessagesForLsp` if needed for sending - -### When Touching Redux - -**MUST DO**: - -1. Use selectors (create if missing) -2. Use immutable updates (even though Immer allows mutations) -3. Add to `combineSlices` if new slice -4. Add middleware if new RTK Query API -5. Test state transitions - -### When Modifying UI - -**MUST DO**: - -1. Use Radix primitives where possible -2. Use CSS Modules (not inline styles) -3. Use design tokens (not literals) -4. Test dark mode -5. Check responsive (at least 768px) -6. Add Storybook story - -### Red Flags - -🚨 **STOP if you see**: - -- Direct state mutation outside reducers -- Hardcoded colors (#hex) or spacing (px) -- `any` types (use proper typing) -- Synchronous network calls (use async) -- Missing type guards for message routing -- Global CSS without `:global()` wrapper -- Missing cleanup in `useEffect` returns - ---- - -## Version History - -**Current**: v2.0.10-alpha.3 - -**Recent changes** (inferred from codebase): - -- Queued messages with priority system -- Compression hints and new chat suggestions -- Reasoning content support -- Tool confirmation improvements -- Docker integration enhancements -- Checkpoints UI polish - ---- - -## Contributing - -### Before Submitting PR - -- [ ] Run `npm run lint` (no errors) -- [ ] Run `npm run types` (type check passes) -- [ ] Run `npm test` (all tests pass) -- [ ] Add tests for new features -- [ ] Add Storybook story for new components -- [ ] Update AGENTS.md if architecture changes -- [ ] Follow existing code style -- [ ] No console.log in production code - -### Commit Messages - -Follow conventional commits: - -``` -feat: add queued messages -fix: prevent double-send on tool confirmation -refactor: extract streaming logic -docs: update AGENTS.md -test: add tool loop prevention test -``` - ---- - -## Getting Help - -**Resources**: +## Testing -- README.md - Library API reference -- Storybook - Component documentation (`:6006`) -- Redux DevTools - State inspection -- GitHub Issues - Bug reports +Vitest + React Testing Library + MSW + happy-dom. Custom render in `utils/test-utils.tsx` wraps Provider/Theme/Tour/AbortController. Fixtures in `__fixtures__/`. MSW handlers mock LSP endpoints. -**Community**: +## Agent Checklist -- GitHub: https://github.com/smallcloudai/refact -- Discord: (check README) +**When modifying chat flow**: Check state transitions, SSE event handling in reducer, command sending via `chatCommands.ts`, sequence validation, tool confirmation logic, type guards. ---- +**When adding SSE events**: Type in `chatSubscription.ts` → handler in reducer's `applyChatEvent` → update `EventEnvelope` union → add tests. -**Last Updated**: December 2024 -**Document Version**: 1.0 -**Maintained by**: SmallCloudAI Team +**When touching Redux**: Use selectors. Register new slices/APIs in store. Add middleware for new APIs. Test state transitions. ---- +**When modifying UI**: Radix primitives. CSS Modules. Design tokens. Test dark mode. -_This document is a living guide. If you find errors or omissions, please update it._ +**Red flags**: Direct `state.chat.thread` (old pattern, use `threads[id]`), hardcoded colors/spacing, `any` types, missing sequence validation, missing `snapshot_received` checks, missing `useEffect` cleanup. diff --git a/refact-agent/gui/package-lock.json b/refact-agent/gui/package-lock.json index 251cac3ab..93f13e33f 100644 --- a/refact-agent/gui/package-lock.json +++ b/refact-agent/gui/package-lock.json @@ -1,12 +1,12 @@ { "name": "refact-chat-js", - "version": "2.0.10-alpha.3", + "version": "7.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "refact-chat-js", - "version": "2.0.10-alpha.3", + "version": "7.0.0", "hasInstallScript": true, "license": "BSD-3-Clause", "dependencies": { @@ -19,6 +19,7 @@ "graphql": "^16.11.0", "react-arborist": "^3.4.3", "react-redux": "^9.1.2", + "react-virtuoso": "^4.18.1", "urql": "^4.2.2", "zod": "^3.25.20" }, @@ -34,6 +35,7 @@ "@radix-ui/react-icons": "^1.3.0", "@radix-ui/react-toolbar": "^1.0.4", "@radix-ui/themes": "^3.0.1", + "@shikijs/transformers": "^3.22.0", "@storybook/addon-essentials": "^7.6.4", "@storybook/addon-interactions": "^7.6.4", "@storybook/addon-links": "^7.6.4", @@ -45,6 +47,7 @@ "@testing-library/dom": "^10.1.0", "@testing-library/react": "^16.0.0", "@testing-library/user-event": "^14.5.1", + "@types/cytoscape": "^3.31.0", "@types/debug": "^4.1.12", "@types/diff": "^7.0.1", "@types/js-cookie": "^3.0.6", @@ -52,7 +55,6 @@ "@types/lodash.isequal": "^4.5.8", "@types/react-dom": "^18.2.17", "@types/react-redux": "^7.1.33", - "@types/react-syntax-highlighter": "^15.5.11", "@types/textarea-caret": "^3.0.3", "@types/wicg-file-system-access": "^2023.10.4", "@typescript-eslint/eslint-plugin": "^6.14.0", @@ -61,6 +63,8 @@ "@vitest/coverage-v8": "^3.1.3", "@vitest/ui": "^3.1.3", "classnames": "^2.3.2", + "cytoscape": "^3.33.1", + "cytoscape-fcose": "^2.2.0", "diff": "^7.0.0", "echarts": "^5.4.3", "echarts-for-react": "^3.0.2", @@ -83,15 +87,16 @@ "patch-package": "^8.0.0", "prettier": "3.1.1", "react": "^18.2.0", + "react-cytoscapejs": "^2.0.0", "react-dom": "^18.2.0", "react-dropzone": "^14.2.10", "react-markdown": "^9.0.1", - "react-syntax-highlighter": "^15.5.0", "redux-persist": "^6.0.0", "rehype-katex": "^7.0.0", "remark-breaks": "^4.0.0", "remark-gfm": "^4.0.0", "remark-math": "^6.0.0", + "shiki": "^3.22.0", "storybook": "^7.6.4", "textarea-caret": "^3.1.0", "typescript": "^5.8.3", @@ -8072,6 +8077,83 @@ "string-argv": "~0.3.1" } }, + "node_modules/@shikijs/core": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.22.0.tgz", + "integrity": "sha512-iAlTtSDDbJiRpvgL5ugKEATDtHdUVkqgHDm/gbD2ZS9c88mx7G1zSYjjOxp5Qa0eaW0MAQosFRmJSk354PRoQA==", + "dev": true, + "dependencies": { + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5" + } + }, + "node_modules/@shikijs/engine-javascript": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.22.0.tgz", + "integrity": "sha512-jdKhfgW9CRtj3Tor0L7+yPwdG3CgP7W+ZEqSsojrMzCjD1e0IxIbwUMDDpYlVBlC08TACg4puwFGkZfLS+56Tw==", + "dev": true, + "dependencies": { + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.4" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.22.0.tgz", + "integrity": "sha512-DyXsOG0vGtNtl7ygvabHd7Mt5EY8gCNqR9Y7Lpbbd/PbJvgWrqaKzH1JW6H6qFkuUa8aCxoiYVv8/YfFljiQxA==", + "dev": true, + "dependencies": { + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.22.0.tgz", + "integrity": "sha512-x/42TfhWmp6H00T6uwVrdTJGKgNdFbrEdhaDwSR5fd5zhQ1Q46bHq9EO61SCEWJR0HY7z2HNDMaBZp8JRmKiIA==", + "dev": true, + "dependencies": { + "@shikijs/types": "3.22.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.22.0.tgz", + "integrity": "sha512-o+tlOKqsr6FE4+mYJG08tfCFDS+3CG20HbldXeVoyP+cYSUxDhrFf3GPjE60U55iOkkjbpY2uC3It/eeja35/g==", + "dev": true, + "dependencies": { + "@shikijs/types": "3.22.0" + } + }, + "node_modules/@shikijs/transformers": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/transformers/-/transformers-3.22.0.tgz", + "integrity": "sha512-E7eRV7mwDBjueLF6852n2oYeJYxBq3NSsDk+uyruYAXONv4U8holGmIrT+mPRJQ1J1SNOH6L8G19KRzmBawrFw==", + "dev": true, + "dependencies": { + "@shikijs/core": "3.22.0", + "@shikijs/types": "3.22.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.22.0.tgz", + "integrity": "sha512-491iAekgKDBFE67z70Ok5a8KBMsQ2IJwOWw3us/7ffQkIBCyOQfm/aNwVMBUriP02QshIfgHCBSIYAl3u2eWjg==", + "dev": true, + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==", + "dev": true + }, "node_modules/@sinclair/typebox": { "version": "0.27.8", "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", @@ -10898,6 +10980,16 @@ "@types/node": "*" } }, + "node_modules/@types/cytoscape": { + "version": "3.31.0", + "resolved": "https://registry.npmjs.org/@types/cytoscape/-/cytoscape-3.31.0.tgz", + "integrity": "sha512-EXHOHxqQjGxLDEh5cP4te6J0bi7LbCzmZkzsR6f703igUac8UGMdEohMyU3GHAayCTZrLQOMnaE/lqB2Ekh8Ww==", + "deprecated": "This is a stub types definition. cytoscape provides its own type definitions, so you do not need this installed.", + "dev": true, + "dependencies": { + "cytoscape": "*" + } + }, "node_modules/@types/debug": { "version": "4.1.12", "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", @@ -11019,9 +11111,9 @@ } }, "node_modules/@types/hast": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.3.tgz", - "integrity": "sha512-2fYGlaDy/qyLlhidX42wAH0KBi2TCjKMH8CHmBXgRlJ3Y+OXTiqsPQ6IWarZKwF1JoUcAJdPogv1d4b0COTpmQ==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", "dev": true, "dependencies": { "@types/unist": "*" @@ -11093,9 +11185,9 @@ "dev": true }, "node_modules/@types/lodash": { - "version": "4.14.202", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.202.tgz", - "integrity": "sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ==", + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-FOvQ0YPD5NOfPgMzJihoT+Za5pdkDJWcbpuj1DjaKZIr/gxodQjY/uWEFlTNqW2ugXHUiL8lRQgw63dzKHZdeQ==", "dev": true }, "node_modules/@types/lodash.groupby": { @@ -11261,15 +11353,6 @@ "@babel/runtime": "^7.9.2" } }, - "node_modules/@types/react-syntax-highlighter": { - "version": "15.5.11", - "resolved": "https://registry.npmjs.org/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.11.tgz", - "integrity": "sha512-ZqIJl+Pg8kD+47kxUjvrlElrraSUrYa4h0dauY/U/FTUuprSCqvUj+9PNQNQzVc6AJgIWUUxn87/gqsMHNbRjw==", - "dev": true, - "dependencies": { - "@types/react": "*" - } - }, "node_modules/@types/resolve": { "version": "1.20.6", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.6.tgz", @@ -13912,6 +13995,15 @@ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", "dev": true }, + "node_modules/cose-base": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cose-base/-/cose-base-2.2.0.tgz", + "integrity": "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==", + "dev": true, + "dependencies": { + "layout-base": "^2.0.0" + } + }, "node_modules/cosmiconfig": { "version": "8.3.6", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", @@ -14043,6 +14135,27 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, + "node_modules/cytoscape": { + "version": "3.33.1", + "resolved": "https://registry.npmjs.org/cytoscape/-/cytoscape-3.33.1.tgz", + "integrity": "sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==", + "dev": true, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/cytoscape-fcose": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz", + "integrity": "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==", + "dev": true, + "dependencies": { + "cose-base": "^2.2.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, "node_modules/data-uri-to-buffer": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", @@ -15783,19 +15896,6 @@ "reusify": "^1.0.4" } }, - "node_modules/fault": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", - "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", - "dev": true, - "dependencies": { - "format": "^0.2.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/fb-watchman": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", @@ -16209,15 +16309,6 @@ "node": ">= 6" } }, - "node_modules/format": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", - "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", - "dev": true, - "engines": { - "node": ">=0.4.x" - } - }, "node_modules/formdata-polyfill": { "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", @@ -16332,6 +16423,20 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -17158,16 +17263,55 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/hast-util-parse-selector": { - "version": "2.2.5", - "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", - "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", "dev": true, + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, + "node_modules/hast-util-to-html/node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==", + "dev": true + }, + "node_modules/hast-util-to-html/node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hast-util-to-html/node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/hast-util-to-jsx-runtime": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.0.tgz", @@ -17252,55 +17396,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/hastscript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", - "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", - "dev": true, - "dependencies": { - "@types/hast": "^2.0.0", - "comma-separated-tokens": "^1.0.0", - "hast-util-parse-selector": "^2.0.0", - "property-information": "^5.0.0", - "space-separated-tokens": "^1.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" - } - }, - "node_modules/hastscript/node_modules/@types/hast": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.8.tgz", - "integrity": "sha512-aMIqAlFd2wTIDZuvLbhUT+TGvMxrNC8ECUIVtH6xxy0sQLs3iu6NO8Kp/VT5je7i5ufnebXzdV1dNDMnvaH6IQ==", - "dev": true, - "dependencies": { - "@types/unist": "^2" - } - }, - "node_modules/hastscript/node_modules/comma-separated-tokens": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", - "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/hastscript/node_modules/property-information": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", - "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", - "dev": true, - "dependencies": { - "xtend": "^4.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/he": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", @@ -17327,15 +17422,6 @@ "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==", "dev": true }, - "node_modules/highlight.js": { - "version": "10.7.3", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", - "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", - "dev": true, - "engines": { - "node": "*" - } - }, "node_modules/hoist-non-react-statics": { "version": "3.3.2", "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", @@ -17397,6 +17483,16 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "dev": true, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/http-errors": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", @@ -19109,6 +19205,12 @@ "integrity": "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==", "dev": true }, + "node_modules/layout-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/layout-base/-/layout-base-2.0.1.tgz", + "integrity": "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==", + "dev": true + }, "node_modules/lazy-universal-dotenv": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/lazy-universal-dotenv/-/lazy-universal-dotenv-4.0.0.tgz", @@ -19798,20 +19900,6 @@ "tslib": "^2.0.3" } }, - "node_modules/lowlight": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-1.20.0.tgz", - "integrity": "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==", - "dev": true, - "dependencies": { - "fault": "^1.0.0", - "highlight.js": "~10.7.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", @@ -21985,6 +22073,23 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/oniguruma-parser": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz", + "integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==", + "dev": true + }, + "node_modules/oniguruma-to-es": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.4.tgz", + "integrity": "sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==", + "dev": true, + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.0.1", + "regex-recursion": "^6.0.2" + } + }, "node_modules/open": { "version": "8.4.2", "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", @@ -22860,15 +22965,6 @@ "node": ">= 0.8" } }, - "node_modules/prismjs": { - "version": "1.29.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", - "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", - "dev": true, - "engines": { - "node": ">=6" - } - }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -23270,6 +23366,19 @@ "react": "^16.3.0 || ^17.0.1 || ^18.0.0" } }, + "node_modules/react-cytoscapejs": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/react-cytoscapejs/-/react-cytoscapejs-2.0.0.tgz", + "integrity": "sha512-t3SSl1DQy7+JQjN+8QHi1anEJlM3i3aAeydHTsJwmjo/isyKK7Rs7oCvU6kZsB9NwZidzZQR21Vm2PcBLG/Tjg==", + "dev": true, + "dependencies": { + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "cytoscape": "^3.2.19", + "react": ">=15.0.0" + } + }, "node_modules/react-dnd": { "version": "14.0.5", "resolved": "https://registry.npmjs.org/react-dnd/-/react-dnd-14.0.5.tgz", @@ -23584,20 +23693,13 @@ } } }, - "node_modules/react-syntax-highlighter": { - "version": "15.5.0", - "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-15.5.0.tgz", - "integrity": "sha512-+zq2myprEnQmH5yw6Gqc8lD55QHnpKaU8TOcFeC/Lg/MQSs8UknEA0JC4nTZGFAXC2J2Hyj/ijJ7NlabyPi2gg==", - "dev": true, - "dependencies": { - "@babel/runtime": "^7.3.1", - "highlight.js": "^10.4.1", - "lowlight": "^1.17.0", - "prismjs": "^1.27.0", - "refractor": "^3.6.0" - }, + "node_modules/react-virtuoso": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/react-virtuoso/-/react-virtuoso-4.18.1.tgz", + "integrity": "sha512-KF474cDwaSb9+SJ380xruBB4P+yGWcVkcu26HtMqYNMTYlYbrNy8vqMkE+GpAApPPufJqgOLMoWMFG/3pJMXUA==", "peerDependencies": { - "react": ">= 0.14.0" + "react": ">=16 || >=17 || >= 18 || >= 19", + "react-dom": ">=16 || >=17 || >= 18 || >=19" } }, "node_modules/react-window": { @@ -23828,122 +23930,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/refractor": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/refractor/-/refractor-3.6.0.tgz", - "integrity": "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==", - "dev": true, - "dependencies": { - "hastscript": "^6.0.0", - "parse-entities": "^2.0.0", - "prismjs": "~1.27.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/character-entities": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", - "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/character-entities-legacy": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", - "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/character-reference-invalid": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", - "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/is-alphabetical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", - "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/is-alphanumerical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", - "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", - "dev": true, - "dependencies": { - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/is-decimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", - "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/is-hexadecimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", - "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", - "dev": true, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/parse-entities": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", - "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", - "dev": true, - "dependencies": { - "character-entities": "^1.0.0", - "character-entities-legacy": "^1.0.0", - "character-reference-invalid": "^1.0.0", - "is-alphanumerical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-hexadecimal": "^1.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/refractor/node_modules/prismjs": { - "version": "1.27.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", - "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, "node_modules/regenerate": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", @@ -23971,6 +23957,30 @@ "@babel/runtime": "^7.8.4" } }, + "node_modules/regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/regex/-/regex-6.1.0.tgz", + "integrity": "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==", + "dev": true, + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-recursion": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz", + "integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + "dev": true, + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-utilities": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==", + "dev": true + }, "node_modules/regexp.prototype.flags": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.1.tgz", @@ -24739,6 +24749,22 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/shiki": { + "version": "3.22.0", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.22.0.tgz", + "integrity": "sha512-LBnhsoYEe0Eou4e1VgJACes+O6S6QC0w71fCSp5Oya79inkwkm15gQ1UF6VtQ8j/taMDh79hAB49WUk8ALQW3g==", + "dev": true, + "dependencies": { + "@shikijs/core": "3.22.0", + "@shikijs/engine-javascript": "3.22.0", + "@shikijs/engine-oniguruma": "3.22.0", + "@shikijs/langs": "3.22.0", + "@shikijs/themes": "3.22.0", + "@shikijs/types": "3.22.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, "node_modules/side-channel": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", diff --git a/refact-agent/gui/package.json b/refact-agent/gui/package.json index a3c72f707..9584de34b 100644 --- a/refact-agent/gui/package.json +++ b/refact-agent/gui/package.json @@ -1,6 +1,6 @@ { "name": "refact-chat-js", - "version": "2.0.10-alpha.3", + "version": "7.0.1", "type": "module", "license": "BSD-3-Clause", "files": [ @@ -34,8 +34,12 @@ "build": "tsc && vite build && vite build -c vite.node.config.ts", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", "preview": "vite preview", - "test": "vitest", - "test:no-watch": "vitest run", + "test": "vitest --exclude 'src/__tests__/integration/**'", + "test:no-watch": "vitest run --exclude 'src/__tests__/integration/**'", + "test:unit": "vitest run --exclude 'src/__tests__/integration/**'", + "test:integration": "vitest run src/__tests__/integration/", + "test:stress": "vitest run src/__tests__/chatStreamingLargeHistory.stress.test.ts", + "test:all": "vitest run", "test:ui": "vitest --ui", "coverage": "vitest run --coverage", "format:check": "prettier . --check", @@ -60,6 +64,7 @@ "graphql": "^16.11.0", "react-arborist": "^3.4.3", "react-redux": "^9.1.2", + "react-virtuoso": "^4.18.1", "urql": "^4.2.2", "zod": "^3.25.20" }, @@ -75,6 +80,7 @@ "@radix-ui/react-icons": "^1.3.0", "@radix-ui/react-toolbar": "^1.0.4", "@radix-ui/themes": "^3.0.1", + "@shikijs/transformers": "^3.22.0", "@storybook/addon-essentials": "^7.6.4", "@storybook/addon-interactions": "^7.6.4", "@storybook/addon-links": "^7.6.4", @@ -86,6 +92,7 @@ "@testing-library/dom": "^10.1.0", "@testing-library/react": "^16.0.0", "@testing-library/user-event": "^14.5.1", + "@types/cytoscape": "^3.31.0", "@types/debug": "^4.1.12", "@types/diff": "^7.0.1", "@types/js-cookie": "^3.0.6", @@ -93,7 +100,6 @@ "@types/lodash.isequal": "^4.5.8", "@types/react-dom": "^18.2.17", "@types/react-redux": "^7.1.33", - "@types/react-syntax-highlighter": "^15.5.11", "@types/textarea-caret": "^3.0.3", "@types/wicg-file-system-access": "^2023.10.4", "@typescript-eslint/eslint-plugin": "^6.14.0", @@ -102,6 +108,8 @@ "@vitest/coverage-v8": "^3.1.3", "@vitest/ui": "^3.1.3", "classnames": "^2.3.2", + "cytoscape": "^3.33.1", + "cytoscape-fcose": "^2.2.0", "diff": "^7.0.0", "echarts": "^5.4.3", "echarts-for-react": "^3.0.2", @@ -124,15 +132,16 @@ "patch-package": "^8.0.0", "prettier": "3.1.1", "react": "^18.2.0", + "react-cytoscapejs": "^2.0.0", "react-dom": "^18.2.0", "react-dropzone": "^14.2.10", "react-markdown": "^9.0.1", - "react-syntax-highlighter": "^15.5.0", "redux-persist": "^6.0.0", "rehype-katex": "^7.0.0", "remark-breaks": "^4.0.0", "remark-gfm": "^4.0.0", "remark-math": "^6.0.0", + "shiki": "^3.22.0", "storybook": "^7.6.4", "textarea-caret": "^3.1.0", "typescript": "^5.8.3", diff --git a/refact-agent/gui/src/__fixtures__/caps.ts b/refact-agent/gui/src/__fixtures__/caps.ts index ff0e9cf4b..00b026fea 100644 --- a/refact-agent/gui/src/__fixtures__/caps.ts +++ b/refact-agent/gui/src/__fixtures__/caps.ts @@ -180,8 +180,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: true, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gpt-4o-mini": { @@ -195,8 +195,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/o1": { @@ -210,8 +210,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { type: "chat", supports_clicks: false, supports_agent: false, - supports_reasoning: "openai", - supports_boost_reasoning: true, + reasoning_effort_options: ["low", "medium", "high"], + supports_thinking_budget: false, default_temperature: null, }, "Refact/o1-mini": { @@ -225,8 +225,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_clicks: false, type: "chat", supports_agent: false, - supports_reasoning: "openai", - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/o3-mini": { @@ -240,8 +240,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: true, - supports_reasoning: "openai", - supports_boost_reasoning: true, + reasoning_effort_options: ["low", "medium", "high"], + supports_thinking_budget: false, default_temperature: null, }, "Refact/claude-3-5-sonnet": { @@ -255,8 +255,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: true, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/claude-3-5-haiku": { @@ -270,8 +270,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/claude-3-7-sonnet": { @@ -285,8 +285,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: true, supports_agent: true, - supports_reasoning: "anthropic", - supports_boost_reasoning: true, + reasoning_effort_options: null, + supports_thinking_budget: true, default_temperature: null, }, "Refact/groq-llama-3.1-8b": { @@ -300,8 +300,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/groq-llama-3.1-70b": { @@ -315,8 +315,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gemini-2.0-flash-exp": { @@ -330,8 +330,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gemini-1.5-flash": { @@ -345,8 +345,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gemini-1.5-flash-8b": { @@ -360,8 +360,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gemini-1.5-pro": { @@ -375,8 +375,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: true, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/gemini-2.0-exp-advanced": { @@ -390,8 +390,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: true, supports_clicks: false, supports_agent: true, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/grok-2": { @@ -405,8 +405,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: false, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/deepseek-chat": { @@ -420,8 +420,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: true, - supports_reasoning: null, - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: null, }, "Refact/deepseek-reasoner": { @@ -435,8 +435,8 @@ export const STUB_CAPS_RESPONSE: CapsResponse = { supports_multimodality: false, supports_clicks: false, supports_agent: false, - supports_reasoning: "deepseek", - supports_boost_reasoning: false, + reasoning_effort_options: null, + supports_thinking_budget: false, default_temperature: 0.6, }, }, diff --git a/refact-agent/gui/src/__fixtures__/chat.ts b/refact-agent/gui/src/__fixtures__/chat.ts index 523352ec1..6459f613c 100644 --- a/refact-agent/gui/src/__fixtures__/chat.ts +++ b/refact-agent/gui/src/__fixtures__/chat.ts @@ -1,9 +1,8 @@ -import type { RootState } from "../app/store"; +import type { ChatThread } from "../features/Chat/Thread/types"; import { ChatHistoryItem } from "../features/History/historySlice"; export * from "./some_chrome_screenshots"; -type ChatThread = RootState["chat"]["thread"]; type ChatMessages = ChatThread["messages"]; export const MARS_ROVER_CHAT: ChatHistoryItem = { @@ -110,43 +109,31 @@ export const CHAT_FUNCTIONS_MESSAGES: ChatMessages = [ // TODO: this might not be correct { role: "tool", - content: { - tool_call_id: "call_WOyQ1sykVGppzWjjUu1drk6L", - content: - "Listing directory .\n 2260 file Cargo.toml\n 1530 file LICENSE\n 224 dir target\n 1198 file mycaps_te3.json\n 416 dir tests\n 152298 file Cargo.lock\n 757 file mycaps_openai.json\n 61 file build.rs\n 1264 file mycaps_gte.json\n 1598 file _video\n 3548 file README.md\n 768 dir examples\n 219 file _backtrace\n 1665 file _video2\n 141 file a.sh\n 139 file _help\n 992 dir src\n", - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_WOyQ1sykVGppzWjjUu1drk6L", + content: + "Listing directory .\n 2260 file Cargo.toml\n 1530 file LICENSE\n 224 dir target\n 1198 file mycaps_te3.json\n 416 dir tests\n 152298 file Cargo.lock\n 757 file mycaps_openai.json\n 61 file build.rs\n 1264 file mycaps_gte.json\n 1598 file _video\n 3548 file README.md\n 768 dir examples\n 219 file _backtrace\n 1665 file _video2\n 141 file a.sh\n 139 file _help\n 992 dir src\n", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_IYK970zyp9vZ36m7emzmNDC9", - content: - 'File README.md:50-99\n``` "temperature": 0.1,\n "max_new_tokens": 20\n }\n}\'\n```\n\nOutput is `[{"code_completion": "\\n return \\"Hello World!\\"\\n"}]`.\n\n[LSP example](examples/lsp_completion.py)\n\n\n## Telemetry\n\nThe flags `--basic-telemetry` and `--snippet-telemetry` control what telemetry is sent. To be clear: without\nthese flags, no telemetry is sent. Those flags are typically controlled from IDE plugin settings.\n\nBasic telemetry means counters and error messages without information about you or your code. It is "compressed"\ninto `.cache/refact/telemetry/compressed` folder, then from time to time it\'s sent and moved\nto `.cache/refact/telemetry/sent` folder.\n\n"Compressed" means similar records are joined together, increasing the counter. "Sent" means the rust binary\ncommunicates with a HTTP endpoint specified in caps (see Caps section below) and sends .json file exactly how\nyou see it in `.cache/refact/telemetry`. The files are human-readable.\n\nWhen using Refact self-hosted server, telemetry goes to the self-hosted server, not to the cloud.\n\n\n## Caps File\n\nThe `--address-url` parameter controls the behavior of this program by a lot. The address is first used\nto construct `$URL/coding_assistant_caps.json` address to fetch the caps file. Furthermore, there are\ncompiled-in caps you can use by magic addresses "Refact" and "HF".\n\nThe caps file describes which models are running, default models for completion and chat,\nwhere to send the telemetry, how to download a\ntokenizer, where is the endpoint to access actual language models. To read more, check out\ncompiled-in caps in [caps.rs](src/caps.rs).\n\n\n## Tests\n\nThe one to run often is [test_edge_cases.py](tests/test_edge_cases.py).\n\nYou can also run [measure_humaneval_fim.py](tests/measure_humaneval_fim.py) for your favorite model.\n\n\n## Credits\n\nThe initial version of this project was written by looking at llm-ls by [@McPatate](https://github.com/McPatate). He\'s a Rust fan who inspired this project!\n```', - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_IYK970zyp9vZ36m7emzmNDC9", + content: + 'File README.md:50-99\n``` "temperature": 0.1,\n "max_new_tokens": 20\n }\n}\'\n```\n\nOutput is `[{"code_completion": "\\n return \\"Hello World!\\"\\n"}]`.\n\n[LSP example](examples/lsp_completion.py)\n\n\n## Telemetry\n\nThe flags `--basic-telemetry` and `--snippet-telemetry` control what telemetry is sent. To be clear: without\nthese flags, no telemetry is sent. Those flags are typically controlled from IDE plugin settings.\n\nBasic telemetry means counters and error messages without information about you or your code. It is "compressed"\ninto `.cache/refact/telemetry/compressed` folder, then from time to time it\'s sent and moved\nto `.cache/refact/telemetry/sent` folder.\n\n"Compressed" means similar records are joined together, increasing the counter. "Sent" means the rust binary\ncommunicates with a HTTP endpoint specified in caps (see Caps section below) and sends .json file exactly how\nyou see it in `.cache/refact/telemetry`. The files are human-readable.\n\nWhen using Refact self-hosted server, telemetry goes to the self-hosted server, not to the cloud.\n\n\n## Caps File\n\nThe `--address-url` parameter controls the behavior of this program by a lot. The address is first used\nto construct `$URL/coding_assistant_caps.json` address to fetch the caps file. Furthermore, there are\ncompiled-in caps you can use by magic addresses "Refact" and "HF".\n\nThe caps file describes which models are running, default models for completion and chat,\nwhere to send the telemetry, how to download a\ntokenizer, where is the endpoint to access actual language models. To read more, check out\ncompiled-in caps in [caps.rs](src/caps.rs).\n\n\n## Tests\n\nThe one to run often is [test_edge_cases.py](tests/test_edge_cases.py).\n\nYou can also run [measure_humaneval_fim.py](tests/measure_humaneval_fim.py) for your favorite model.\n\n\n## Credits\n\nThe initial version of this project was written by looking at llm-ls by [@McPatate](https://github.com/McPatate). He\'s a Rust fan who inspired this project!\n```', + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_8jTn7oj8tfctEnqgKQRBJH0w", - content: - 'File Cargo.toml:39-88\n```futures-util = "0.3"\nasync-stream = "0.3.5"\nchrono = "0.4.31"\nregex = "1.9.5"\nasync-trait = "0.1.73"\nsimilar = "2.3.0"\naxum = "0.6.20"\nuuid = { version = "1", features = ["v4"] }\nlazy_static = "1.4.0"\n\nregex-automata = { version = "0.1.10", features = ["transducer"] }\nsorted-vec = "0.8.3"\ntree-sitter = "0.20"\ntree-sitter-cpp = "0.20"\n#tree-sitter-c-sharp = "0.20"\ntree-sitter-java = "0.20"\ntree-sitter-javascript = "0.20"\n#tree-sitter-kotlin = "0.3.1"\ntree-sitter-python = "0.20"\ntree-sitter-rust = "0.20"\ntree-sitter-typescript = "0.20"\n\narrow = "47.0.0"\narrow-array = "47.0.0"\narrow-schema= "47.0.0"\nasync_once= "0.2.6"\nasync-process = "2.0.1"\nitertools = "0.11.0"\nlance = "=0.9.0"\nlance-linalg = "=0.9.0"\nlance-index = "=0.9.0"\nlog = "0.4.20"\nmd5 = "0.7"\nmockito = "0.28.0"\nnotify = { version = "6.1.1", features = ["serde"] }\nparking_lot = { version = "0.12.1", features = ["serde"] }\nrusqlite = { version = "0.30.0", features = ["bundled"] }\ntempfile = "3.8.1"\ntime = "0.3.30"\ntokio-rusqlite = "0.5.0"\nvectordb = "=0.4.0"\nwalkdir = "2.3"\nwhich = "5.0.0"\nstrsim = "0.8.0"\ntypetag = "0.2"\ndyn_partial_eq = "=0.1.2"\nrayon = "1.8.0"\nbacktrace = "0.3.71"\nrand = "0.8.5"\n```', - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_8jTn7oj8tfctEnqgKQRBJH0w", + content: + 'File Cargo.toml:39-88\n```futures-util = "0.3"\nasync-stream = "0.3.5"\nchrono = "0.4.31"\nregex = "1.9.5"\nasync-trait = "0.1.73"\nsimilar = "2.3.0"\naxum = "0.6.20"\nuuid = { version = "1", features = ["v4"] }\nlazy_static = "1.4.0"\n\nregex-automata = { version = "0.1.10", features = ["transducer"] }\nsorted-vec = "0.8.3"\ntree-sitter = "0.20"\ntree-sitter-cpp = "0.20"\n#tree-sitter-c-sharp = "0.20"\ntree-sitter-java = "0.20"\ntree-sitter-javascript = "0.20"\n#tree-sitter-kotlin = "0.3.1"\ntree-sitter-python = "0.20"\ntree-sitter-rust = "0.20"\ntree-sitter-typescript = "0.20"\n\narrow = "47.0.0"\narrow-array = "47.0.0"\narrow-schema= "47.0.0"\nasync_once= "0.2.6"\nasync-process = "2.0.1"\nitertools = "0.11.0"\nlance = "=0.9.0"\nlance-linalg = "=0.9.0"\nlance-index = "=0.9.0"\nlog = "0.4.20"\nmd5 = "0.7"\nmockito = "0.28.0"\nnotify = { version = "6.1.1", features = ["serde"] }\nparking_lot = { version = "0.12.1", features = ["serde"] }\nrusqlite = { version = "0.30.0", features = ["bundled"] }\ntempfile = "3.8.1"\ntime = "0.3.30"\ntokio-rusqlite = "0.5.0"\nvectordb = "=0.4.0"\nwalkdir = "2.3"\nwhich = "5.0.0"\nstrsim = "0.8.0"\ntypetag = "0.2"\ndyn_partial_eq = "=0.1.2"\nrayon = "1.8.0"\nbacktrace = "0.3.71"\nrand = "0.8.5"\n```', + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_Ql7xrkn5BqtjVSHHAnNksFis", - content: - 'File Cargo.lock:6265-6314\n```]\n\n[[package]]\nname = "zstd"\nversion = "0.11.2+zstd.1.5.2"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"\ndependencies = [\n "zstd-safe 5.0.2+zstd.1.5.2",\n]\n\n[[package]]\nname = "zstd"\nversion = "0.12.4"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c"\ndependencies = [\n "zstd-safe 6.0.6",\n]\n\n[[package]]\nname = "zstd-safe"\nversion = "5.0.2+zstd.1.5.2"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"\ndependencies = [\n "libc",\n "zstd-sys",\n]\n\n[[package]]\nname = "zstd-safe"\nversion = "6.0.6"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581"\ndependencies = [\n "libc",\n "zstd-sys",\n]\n\n[[package]]\nname = "zstd-sys"\nversion = "2.0.9+zstd.1.5.5"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656"\ndependencies = [\n "cc",\n "pkg-config",\n]\n```', - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_Ql7xrkn5BqtjVSHHAnNksFis", + content: + 'File Cargo.lock:6265-6314\n```]\n\n[[package]]\nname = "zstd"\nversion = "0.11.2+zstd.1.5.2"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"\ndependencies = [\n "zstd-safe 5.0.2+zstd.1.5.2",\n]\n\n[[package]]\nname = "zstd"\nversion = "0.12.4"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c"\ndependencies = [\n "zstd-safe 6.0.6",\n]\n\n[[package]]\nname = "zstd-safe"\nversion = "5.0.2+zstd.1.5.2"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db"\ndependencies = [\n "libc",\n "zstd-sys",\n]\n\n[[package]]\nname = "zstd-safe"\nversion = "6.0.6"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581"\ndependencies = [\n "libc",\n "zstd-sys",\n]\n\n[[package]]\nname = "zstd-sys"\nversion = "2.0.9+zstd.1.5.5"\nsource = "registry+https://github.com/rust-lang/crates.io-index"\nchecksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656"\ndependencies = [\n "cc",\n "pkg-config",\n]\n```', + tool_failed: false, }, { role: "assistant", @@ -159,53 +146,38 @@ export const CHAT_FUNCTIONS_MESSAGES: ChatMessages = [ }, { role: "tool", - content: { - tool_call_id: "call_KKgK2Ki3bpvCk2jCtDhQxfpw", - content: - "Listing directory tests\n 2438 file test_hf_endpoint.py\n 3021 file lsp_connect.py\n 678 file lsp_completion.py\n 0 file __init__.py\n 96 dir __pycache__/\n 1340 file test_at_completion.py\n 3926 file post_doc_info.py\n 224 dir emergency_frog_situation/\n 4107 file test_edge_cases.py\n 2081 file code_completion_with_rag.py\n 2866 file measure_humaneval_fim.py\n", - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_KKgK2Ki3bpvCk2jCtDhQxfpw", + content: + "Listing directory tests\n 2438 file test_hf_endpoint.py\n 3021 file lsp_connect.py\n 678 file lsp_completion.py\n 0 file __init__.py\n 96 dir __pycache__/\n 1340 file test_at_completion.py\n 3926 file post_doc_info.py\n 224 dir emergency_frog_situation/\n 4107 file test_edge_cases.py\n 2081 file code_completion_with_rag.py\n 2866 file measure_humaneval_fim.py\n", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_dT0OdIL7JLYJ7Fxk72MvmuRA", - content: - "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", - finish_reason: "call_failed", - tool_failed: false, - }, + tool_call_id: "call_dT0OdIL7JLYJ7Fxk72MvmuRA", + content: + "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_C5uTWek5PUKmaTfe7u0TypZL", - content: - "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", - finish_reason: "call_failed", - tool_failed: false, - }, + tool_call_id: "call_C5uTWek5PUKmaTfe7u0TypZL", + content: + "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_9vYcfrFCiUhy8g6bNn0WJn5p", - content: - "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", - finish_reason: "call_failed", - tool_failed: false, - }, + tool_call_id: "call_9vYcfrFCiUhy8g6bNn0WJn5p", + content: + "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_6Cg6UfAvNTgEt96EQrHz16W6", - content: - "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", - finish_reason: "call_failed", - tool_failed: false, - }, + tool_call_id: "call_6Cg6UfAvNTgEt96EQrHz16W6", + content: + "ERROR: [Errno 21] Is a directory: './tests/emergency_frog_situation'", + tool_failed: false, }, { role: "assistant", @@ -215,13 +187,10 @@ export const CHAT_FUNCTIONS_MESSAGES: ChatMessages = [ { role: "user", content: "use ls, don't be stupid" }, { role: "tool", - content: { - tool_call_id: "call_UoHvkwbPq6LMAKRM0iblVkSB", - content: - "Listing directory tests/emergency_frog_situation\n 1516 file jump_to_conclusions.py\n 695 file set_as_avatar.py\n 96 dir __pycache__/\n 777 file frog.py\n 249 file work_day.py\n", - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_UoHvkwbPq6LMAKRM0iblVkSB", + content: + "Listing directory tests/emergency_frog_situation\n 1516 file jump_to_conclusions.py\n 695 file set_as_avatar.py\n 96 dir __pycache__/\n 777 file frog.py\n 249 file work_day.py\n", + tool_failed: false, }, { role: "assistant", @@ -246,13 +215,10 @@ export const CHAT_FUNCTIONS_MESSAGES: ChatMessages = [ }, { role: "tool", - content: { - tool_call_id: "call_spx7e7LMfw97BmmzojQQf0rO", - content: - "File tests/emergency_frog_situation/frog.py:1-29\n```import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n```", - finish_reason: "call_worked", - tool_failed: false, - }, + tool_call_id: "call_spx7e7LMfw97BmmzojQQf0rO", + content: + "File tests/emergency_frog_situation/frog.py:1-29\n```import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n```", + tool_failed: false, }, { role: "assistant", @@ -295,22 +261,17 @@ export const FROG_CHAT: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_NSSpdvLovaH50zZUug463YRI", - content: - "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", - tool_failed: false, - }, + tool_call_id: "call_NSSpdvLovaH50zZUug463YRI", + content: + "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_cmTkaNJ0roopnMcNfG4raxny", - content: - "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", - - tool_failed: false, - }, + tool_call_id: "call_cmTkaNJ0roopnMcNfG4raxny", + content: + "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", + tool_failed: false, }, { role: "context_file", @@ -344,13 +305,10 @@ export const FROG_CHAT: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_8ER9PVREdkt37h84LZyc97c9", - content: - "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", - - tool_failed: false, - }, + tool_call_id: "call_8ER9PVREdkt37h84LZyc97c9", + content: + "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", + tool_failed: false, }, { role: "context_file", @@ -385,13 +343,10 @@ export const FROG_CHAT: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_1bHhD3bVIzvOueSDq1otYX4i", - content: - "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", - - tool_failed: false, - }, + tool_call_id: "call_1bHhD3bVIzvOueSDq1otYX4i", + content: + "attached file: /Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py", + tool_failed: false, }, { role: "context_file", @@ -513,11 +468,9 @@ export const CHAT_WITH_DIFF_ACTIONS: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_n5qeQaFZNAoaP3qJzRiGO6Js", - content: "performed vecdb search, results below", - tool_failed: false, - }, + tool_call_id: "call_n5qeQaFZNAoaP3qJzRiGO6Js", + content: "performed vecdb search, results below", + tool_failed: false, }, { role: "context_file", @@ -634,13 +587,10 @@ export const LARGE_DIFF: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_b0ZalvpaQCZLGIHS0t4O3tH3", - content: - " \n Users\n marc\n Projects\n refact-lsp\n tests\n emergency_frog_situation\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n", - - tool_failed: false, - }, + tool_call_id: "call_b0ZalvpaQCZLGIHS0t4O3tH3", + content: + " \n Users\n marc\n Projects\n refact-lsp\n tests\n emergency_frog_situation\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n", + tool_failed: false, }, { role: "assistant", @@ -659,12 +609,9 @@ export const LARGE_DIFF: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_YozL4pz5zNwdEaNWhdVQdcIF", - content: "performed vecdb search, results below", - - tool_failed: false, - }, + tool_call_id: "call_YozL4pz5zNwdEaNWhdVQdcIF", + content: "performed vecdb search, results below", + tool_failed: false, }, { role: "context_file", @@ -903,13 +850,10 @@ export const TOOL_IMAGE_STUB: ChatMessages = [ }, { role: "tool", - content: { - tool_call_id: "a", - content: - "Opened new tab new\n\nChrome tab navigated to https://www.wikipedia.org/", - - tool_failed: false, - }, + tool_call_id: "a", + content: + "Opened new tab new\n\nChrome tab navigated to https://www.wikipedia.org/", + tool_failed: false, }, { role: "assistant", @@ -936,17 +880,15 @@ export const TOOL_IMAGE_STUB: ChatMessages = [ // }, { role: "tool", - content: { - tool_call_id: "b", - content: [ - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAJABQADASIAAhEBAxEB/8QAHAABAAEFAQEAAAAAAAAAAAAAAAYCAwQFBwEI/8QAXRAAAQMDAgMEBgMIDQgIBAcBAAECAwQFEQYhEjFBBxNRYRQiMnGBkRWhsRYjM0JScsHRCDZWYnN0gpKUstLh8BckNDU3U5WzJUNUdZOiwvFEVWOkJjhXZaO00+L/xAAZAQEBAQEBAQAAAAAAAAAAAAAAAQMCBAX/xAA0EQEAAgIBAwIEAwYHAQEAAAAAAQIDESEEEjETQVFhcZEUIoEFFTIzobEjNEJSwdHh8PH/2gAMAwEAAhEDEQA/AO/gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAct152U/StPdLvZ7zeILs9HTsgSqVYXuxngRuMpnGEwu2TqQA+K9IVM961fa7Xdr9c6aiqp0hkkiqXI5qrs3CrlEy7CZVNsn11pfTFJpS1voKOpraiN8qzK+sm71+VRExnw9VNvefLfbDpd2lO0OqfTtWOkrl9Mplbtwq5fWRPDDs7dEVD6V7PNVN1jom33VXItSre6qmp+LM3Z3uzs5PJyAce7Y9C1ulaJmoLLeLs6kknVtVFJUud3SuXLXNVMYbnbfqqeJT2EW6l1JXVVZcbxdX3K2zRzRQelqkbmb7qi7u3Tffw8Tvt6tFLfrJWWqtZxU1XE6J6dUynNPNF3TzQ+SNO3Gu7Ku1JG1nEiUc601Y1qbSQu5qidUxh6e5APpbX+lLXf7Ytfc7rcrdHboJZO9o6ju0RuEVVcmN8cP2nC+yXRt117VVVXcr1dILRSKjHOhqXI+WRUzwoq5RERMKu3VPHKT3ty1RLU262aPsju/rL05j3JEueKJXeoifnO+pq+J0nRmmKfR+lKGy0+HLCzM0iJ+EkXdzvivLywnQC9U6bpKnSiadfUVjaVIGQJMyZUmw3GF4/HbdT5v7TtNVWltaW6x6fvl2qpK+Jitp5alznte56tamUxsqp4H1LNLHTwyTSvayKNqve5y4RqImVVTgnZlDJ2g9rl51xVsctHRuVtI1ycnKnDGn8liKq+aooEx0d2RJYKmhudz1Hdq64QKkjom1CpT8WOWFyrkT3pnw6HTTWVGpLFSVD6epvVuhmYuHxy1TGuavmirlC191mm/wB0Fq/psf6wNwCw2spX0XpramF1Lwd536SIrODGeLi5Yx1Nd91mm/3QWr+mx/rA3ANbS6isldUspqS82+onfnhiiqmPc7CZXCIuV2QuV15tdskbHcLlR0j3plraidsauTxTKoBnA0/3Wab/AHQWr+mx/rNjJXUkVF6bJVQMpOBH9+6REZwryXi5Y35gXwaf7rNN/ugtX9Nj/WXqXUVkrallPSXm31E788MUVUx7nbZ2RFyuwGyBh192ttr7v6QuFJSd5ng9ImbHxYxnGVTOMp8zD+6zTf7oLV/TY/1gbgGqi1NYJ38EN8tsjvBlXGq/UptEVHNRzVRUVMoqdQPQYtdcqG2RNlr62npI3O4WvnlbGir4Iqrz2MH7rNN/ugtX9Nj/AFgbgGn+6zTf7oLV/TY/1lcOp7BUTRww3y2SSyORjGMq41c5yrhEREXdVA2oKJZY4IXzTSNjijarnveuEaibqqqvJDVfdZpv90Fq/psf6wNwDT/dZpv90Fq/psf6zKpL1aq+RI6O50VS9eTYZ2vX6lAzgAABq5tTWGmnfBPe7bFNG5Wvjkq42uaqc0VFXZS391mm/wB0Fq/psf6wNwCiKWOeFk0MjZIpGo5j2LlrkXdFRU5oVgDiPbz2gXCwyW6xWSvlpKt6ek1MsD+F7WZwxuU5ZVHKvuTop2qoqIqSmlqZ5EjhiYskj3cmtRMqq/A+c4NMTdpemdb62qIXLVVEi/RbXJuyOHDlRPe1EZ70UDtegNSpq3RFsu7nIs8kXBUIm2JW+q7bplUynkqElPnn9jhqXu6u56amf6sqemU6Kv4yYa9PeqcK/wAlT6GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5l246S+6PQsldTx8VdalWojwm7o8ffG/JEd/JOZfse9WJbNTVOnqmTFPcm8cOV2SZicv5Tc/FrUPplzWvarXNRzVTCoqZRUPjXXlgqezztHmioldCyGZtZQSJ0Yq8Tcfmqit/kgfZZwP9kTpBHQ0mrKWP1mKlLWYTmn4j1+OW582nZNKagp9U6Xt96psIyqiRzmovsPTZzfg5FT4HMu3fUM01LbtD2tO9uF3lYsrE58HGnA3y4non8xfECJdgFsjv2rKu9XKq9IqbVTRQ0sUi5VqK1WI5PJrW8KfnH0kfHOjrxWdmPac1K5FY2nndR17UzhY1XDlTxRMI5PHCH2Kx7ZGNexyOY5Mtci5RU8QOY9umqFsWhH26neqVl3d6MxG8+75yL8sN/lkh7M9Kpo/QtBbZGI2re3v6vx71+6ovuTDf5JzVzf8pf7IFf8ArLNptEz1a57HfLeT5tYd4A5l2raE01WaRvt8faoW3WKndO2qjVWvV7eq4XC8sbnKOwjSFj1Td7s69ULaxtJFGsUb3KjUVyrlVRF35dTvXaX/ALM9R/xGT7DkH7Gj/Weov4GD+s8Dv0droIrT9FMpIW2/ulg9GRqcHdqmFbjwxsfOfb1oywaYSy1Vlt7KJ1U6ZszY3Lwu4eBUXCrhOa8j6XODfsmP9C03/CVH2RgSHsU0ZYKfRln1Glvjfd5myPWqequc313M9VOSeqmNk8TD7d2Wq5UluslPb21uqq2RrKJI/wAJFHxZcq/vVwqb7c1/FNRpntcsWi+yG00kciVt6ZFI1tGzOGOWRyosjuiYVF23X60mfZbphVpE1teallwv95jSZZ85bBE5MpGzw2wi+GMdNwxtG9iGmrJaoHXqiiul0c1FmfMqujY7q1jeWE8VTK+WcHRJ7Tb6m0LaZqOF9vWNIvRnMTg4ExhuPBMJ8jNAHyr26aTsultQ21tlom0cVTTOfJGxyq3iR2MplVxt4bbHZey/Qmm7ZpawXuntcX0pNRRzuqnqrno97MuxldvaVNuhzT9kp+2Gx/xR/wDXO29n/wDs601/3ZT/APLaBnXvTNk1GyJt5tdNXJCjkj79iOVnFjOF6ZwnLwQ+TrNp22z9tLdPzQK+2su8tP3SuXeNj3IjVXnyREPsY+TbD/8AmOX/AL+qP+Y8D6AreynQ9bQvpHado4mubhJIGcEjfNHJvn3nC9IajuvZl2pSaZkrZZ7R6d6JLC9ct4XOw2Vqfiu3RVxz3TwPqVzmsarnKjWomVVVwiIfKlPRL2h9vs81uastD9IJPJM1PV7iJURXZ/fcKInm5APpq96ftOo6NtJeKCGtga7jayVueF2FTKeC4VT5J1xp222rtbq7FQwrDb0q4Y2xo9VVrXtYqoirlfxlPsg+SO0+eOl7dLhUTO4Yoqume92FXDUjjVV2A78nY7oBGon3OQ7JjeaX+0c37ROzC1aHqrTq6wMlgo6OvgdV07pFe2NvGio9qruiZTCoqrzQndZ25aCgop5ae8uqJmMV0cLaSZqyOxs3LmIiZ81N12dXKu1F2d2m43mVtVV1LXySPWNrUX747h9VERNkRvTp4gSiop4aumlpqiJssEzFjkjemWvaqYVFTwVD5r7e9HWDTC2SostujonVSztmbEq8LuHgxsq4T2l5eJ9MHBP2TH+jaa/PqfsjA3vZN2e6Urezq2XGuslJWVlW18kstSzvFVUe5ERM7ImETkbbUHYjo68U0i0NEtprecdRSOVEa7plirw492F80Nh2Pf7KLB/BP/5jycAfOWnO0bUfZnqx2lNZTPrLfE9Gd+9Ve+Fi+zIx3NzMY2XdOmFTC/RccjJY2yRuR7HojmuauUVF5Kh88fslbbHHc7DdGtTvJ4ZYHr4oxWub/XcdL7GLvLd+y61OncrpabjpVcq5yjHKjfk3hT4AaTth0JppdFXu/stUMV1jRsyVMSq1znK9qKrkRcLnK806nPuwXRth1RJe6m90DK1aTuWwskcvC3i48qqIu/spzOydr3+ym/8A8C3/AJjTnP7Gb8Bqb86m+yUDu9NTw0lLFTU8TYoIWJHHGxMNY1EwiIngiF0ADl/bjqGa36RhsNBl1xvkyUsbG+0seU4se/LW/wApScaWsMOmdLW2yw4VtJAjHKnJz+bnfFyqvxPn7UGs5br24LeaazVd7oLG5YYKelRV3bxJx5Rq/wDWKqovXCeBOf8ALXeP/wBOL58n/wD+YHI7oyTst7aXSwtVtNR1iTRtantU0m6tT+Q5W+9D65iljnhZNE9HxyNRzHNXZyLuiofJ3a1fqvWFXRXiXSdys7qeJYJZqljuF6ZyxMq1MKiq7358jtnYhqX6f7OqanlfxVVsd6JJld+BEzGvu4VRP5KgdIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAORdv2kvpnSMd8p481dqcrn4Td0DsI75Lh3knEddLVVTQ1tJNS1EaSQTRujkY7k5qphUX4KB87dgeu6a0Q3Wx3WpbFSMifXwPeuzeFv3xv81Edj967xNt2T0VRrvtDvHaFc417mKRYqJjt0a5UwiJ+YzCe92TjuoNGXCz69qNLQxPmqfSUiptt5WuX1F+KKmfDfwPsDSenafSml6Cy02FbTRIj3omO8eu7nfFVVQOGfsidJei3Sj1TTR4jq0SnqlROUjU9Ry+9qY/kJ4mz0f2rJRdiVwdPOn0vaGJR06OXd/HtC7z4d8+Ufmdc1npuHVukbjZZeFHVES909fxJE3Y74ORM+WT5N0Jo2o1H2h0tgq4HsbDM5a5qphY2Rr66L4Kqpw+9UA+h+xLSztPaDirKlipX3ZyVcqu9pGL+DRfh63vcp0k8a1rGIxjUa1qYRETCIh6BFe0v/AGZ6j/iMn2HIP2NH+s9RfwMH9Z5P+1jW2naHRl8s77rTPuc0DoG0kT0fIjnflIns4TffByjsE1VZdN3m7x3mviom1cMaRSTLhiq1VyiryT2uvgB9QnBv2TH+hab/AISo+yM7gy4UUlu+kWVkDqHu++9JSRO74MZ4uLljG+T5y7ftYWLUclmorPcIq11Ksr5pIV4mN4uFERHclX1V5Abiw9lNp1n2LWqqpKeKlv3dSPjqm7d65JHojZPFFRETPNNvcuj7JO0Op0Re5NJalV8FA6ZY077ZaObOFz4MVefRF38TonYlq6xVWh7TYG3CFl2p0lY6lkdwvd67n5ai+0nCudvBfA1/bp2dQ3e0zart7Wx3Cij4qpqbJPEnX85qfNNuiAdmRcplOQPn3sj7ZKShtjbBqusWJtOiNo62RFcnB/u3qmcY6LyxtthM91+mLZ9EJdluFMluWNJPSllRIuHx4uWAPnz9kp+2Gx/xR/8AXO29n/8As601/wB2U/8Ay2nz1276ps+ptS25LPWsrI6SmVkksW7OJXZwi9dvDbc7P2X6007c9IWG0091pvpKGijgdSPejZVcxmHYau6+yq7Z2A6CfINNbo7v281VBLNPDHPe6hiyU8nBI374/druin1RfdT2TTNO2e9XOmomPRVYkr/WfjGeFvN2MpyReaHyZZ9SW2n7ZW6jmkcy2uu0lSsisVVbG97lRVRN+S5xzA+g6nsjjr4VpbjrPVlXRLstPLXorXt8Her6xLdOaUsmkqFaOyW+OljdhXuTLnyL4ucu6/Hl0L1l1DZ9RUzqiz3KmrYm4R6wyI5WKvJHJzRfebMAfJfaS1r+3ura5Ec1a6lRUVMoqcEZ9S3e+WqwUiVd2uFNRQK7ha+eRGI52M4TPNcIuyeB8ha11LQXbtWrNQUSvlofS4pGOxhXtjRqZRF8eHKZ8QPrSu0pYbjQzUlRZ6F0UrFY7/N2ZTKYyi42XzMXQun6nS2i7dZKyeKeeka9qyRZ4XIr3OTnvyVDTs7Y9APja/7oom8SZw6GVFT3+qQrtN7ZbLVaZnsulqx9bXV7e5dNHG5jYmO2du5Ey5U2THiu+2FDt5wT9kx/o2mvz6n7IzttDFHZ7FTQ1E7WxUdM1sk0j8IiMaiK5VXptlVU+de37WFi1JPZaOzXCKtdR986aSFcsTi4OFEdyX2V5Adg7Hv9lFg/gn/8x5ODkHZJ2h6UpOz+12muvVLRV1K17JI6p/dpu9yoqOXZUVFTqSq7dreh7RA6SS/01S5E2jo175zl8E4cp81QDnH7Jepj7jTlLlFkV08ip1RMMT9fyJn2FW+Sh7LaF8rVatVNLOiL+SruFPmjc/E5qun9QduGuG3qqo57ZpyJGxxySphVhRVXDM+09yqqqqbJnrhEX6MoqOnt1DT0VJE2Kmp42xRRt5Na1MInyQCIdr3+ym//AMC3/mNOc/sZvwGpvzqb7JSTdsmttOxaGvFjZdaea6TcMKUsL0e9rkeirxY9nCIvP3HPuwDVtj05NfKW83CGhdV9w6F87uFjuHjRycXJF9ZOYH0uQ/tO1T9yOg7hcI38NXI30el3371+yKnuTLv5JvLhqSyWm3wXCvu1FT0dQiLDNJM1GyoqZThXPrbb7dD5s7WO0O2601ZbqKCWR+naCVO8kaiosyqqcb0TnhGphPivUDr3Yhpj7n+z6nqpmcNXdHelyKqb8CpiNP5vrfylOkmjsGqtN35jYLHdqGqWONHJBDInGxiYTPBzREyicvA3gGi1np9uqdHXSzOROOpgVIlXkkiesxfg5EPnXsF1E+xa/ks9Sqxw3Niwua7bhmZlWZ8/ab73H0td7/aLBCya73OkoY3qqMWolRnGqc0TPP4Hx/ra6W+HtPuN30zU8dO2sbVU8zUVE7zZ7lTPTj4sAfaIIRo/tT0zq6npY4rhFTXOVqI6imXgej8btbnZ/lgm4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFJZ7bLd4rtJQU7rjFGsUdU6NFkaxeaI7mnNfmvipmgADCprPbaO41VxpqGniravHpE7I0R8uOXEvUzQAAAGlq9IaZr6qSqrNO2ipqJVzJNNRRve9fFVVuVLP3CaP8A3KWP/h0P9kkAAxmW+ijt30cyjp20PdrF6MkTUj4FTHDw4xw42xyNR9wmj/3KWP8A4dD/AGSQADT0WktN22rjq6DT1ppamPPBNBRRse3KYXDkTKbKqfE2k8EVTBJBPEyWGRqtfHI1HNci80VF5oXABp/uT03+5+1f0KP9Rmvtduktq219BSuoFbwrSuhasSt544MYx8DLAEf+4TR/7lLH/wAOh/smRRaS01bauOrodPWmlqY88E0FFGx7cphcORuU2VUNwAMKvs9sujo3XC3UlYsWUjWogbJwZxnGUXGcJ8kMT7k9N/uftX9Cj/UbgAYdDabda0kS32+lpEkxx+jwtj4scs4RM81MwADCuVotl5gbBdLdSV0LHcbY6qBsrWuxjKI5F3wq7+ZgRaL0rBnutM2aPPPgoIkz/wCU3gA0/wByem/3P2r+hR/qPW6U041yObYLUjkXKKlHHlF+RtwBRNDFUwSQTxMlhlarJI5Go5r2qmFRUXZUVOhovuE0f+5Sx/8ADof7JIABH/uE0f8AuUsf/Dof7JkUmk9OUEneUen7VTP/ACoaONi/NENwAAAA0lTo3S9ZUyVNVpuzzzyuV8kstDE5z3LzVVVuVUtfcJo/9ylj/wCHQ/2SQADW1mn7LcKOno62z2+ppaZESCGamY9kSImERrVTDdttjB+4TR/7lLH/AMOh/skgAGrtumrDZ6l1Ra7JbaGdzVYstLSMicrVVFxlqIuMom3kbQADAudjtN6bG262uir2xKqxpVU7JUYq88cSLjkhrvuE0f8AuUsf/Dof7JIABpKbR2l6KpjqaXTdngqInI6OWKhia5ipyVFRuUU3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKFmjauHSMRU6K5AKwW+/h/3rP5yGm1BrLT+loopbzcW0zJcox3dvfnGM+yi45oBvQYNmvFBf7TT3S2T9/RVCKsUvA5vEiKqLs5EVN0XmhnAAAABS57Ge05G+9cFPfw/71n85ALgKWyMeuGva5fJclQAAAAAAAVURFVVwidVLffw/wC9Z/OQC4ChJolXCSsVV/fIVgAAAAAAAAAau/ajtOmKBK681aUtMr0Ykisc5OJUVceqi+ClOntTWfVdudcLJWelUrZViWTu3s9dERVTDkReqAbYAAAAAAAAGDd7zQWG3PuFzqO4pY1RHScDnYz5NRVNdprWuntXuq0sNxSs9E4O+xC9nDxZ4faamc8K8vADfg19LfbTW3WqtdLcaaavpUzPTskRXxp5p8U+ZsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHL7f2UUNw19qPUOpKGKqiqKhEoYJHcTeHhTL3Ii887Ii8sKuN0OoAD537NtJ2C7dqOuLdX2mlqKOkqZWU8L2erEiTOaiN8NkRDeS9nn3E6Z7TGRRo6z1tFHJQq96OVOFsiq1evqq5MKvPbrko7Jf8AbH2hfxub/wDsPOkdo/8As21H/wB3zf1VAgugO0TSmk+zHT1JebvHBUuievcsY+R7UWV+6oxFx8TqtrutDe7ZBcbbVR1NHO3ijljXZycl9yoqKiou6Khzrswslsl7D4Y3UUOK+lnWqVGJmVeJ7cqvVURERPDCEW7ObpWWz9jtqCspHvbUU8lSkLm848sZunuVyqB064dpWkrZV1FNUXXifTO4ah0FPLMyFfB72NVrfipJKKupblRQ1tFUR1FNM3ijlicjmuTxRUOOdmMepU7LqOktmnLPVW+rZN3ks1e5jpuJ7mu4mpGvhw8+SIS7sk0pe9G6TmtN7kge9Kt0sCQyK9Gsc1u26Jj1kcvxA2naDp21ag0hcUudGyd1LSzSwPXKOiejFVHNVPchzTsQ0bpu/aBkrLtZaOsqErZGd7NGjncKNbhM+G6nX9UftSvP8Rn/AOW44j2TaLm1P2Y1ndalvVtWSqljSGlnRsKrwt3c3hyuc74cmQJJpfT+ndJ6ordf2ishi0fVW10fFh/3qVZmIuGqmeDLF38/DBK3drOhm2x1wXUEPo7Ze5z3UnE52EVURvDxKmFTdExuXuzO01ll7OLTbLlTrDVQskbLE/fGZHL9aKnzOadi+k7Hf9MapprlbopmT1y07l3a5I28LmtRU3TDt9vBAOv1mrbBb7DBe6q6QRW6oa10Myqv3ziTKI1uMqvkiZMey6507f7i+3UFevpzG8a01RDJBIrfFGyNRVT3HJb/AA1FH25aasFnt1PPTWig/wCj6KqnVkeeB7ldxYcuUwi533YhIb5pTW2o9caa1BJbrVbn2qdqyvhrnSOli42qrfYTpxpjrxKB0C+6tsem5IIrnXJHUT57mnjjdLLJjwYxFcqeeMFVg1XY9URzPs9wZUrA7hmjVrmSRr4OY5EcnXmnRTj2lbjqCv7Zda3CgtlFX1lNKtKz0yqWHuYmvVqIzDXc0YmeX1ko09pHVMPa3Uaur6S30NJWUyw1MFNVOkVyo1ERd2pndrVA6ZV0lPX0c1JVwsmp5mLHLG9Mte1UwqKngfPmnNHaeX9kHfbFJa4JbXBTOkippU42sVUiXbPm5fmfRJwGmtK3n9kpqKlS419Bim4++oZkjk2ZDtnC7b/UgEhv3Zppm56no26VhpKC8WSrpamtgYjmsdA5yuROWOL1VVMfHmhN7p2g6Vst7bZrhd2QXBzmMSFYnquX44d0aqb5TfJHNFaUqdDap1hXXGvqai2TxU87LjXSo57kakiv43eLc88JtgjvbdHRXau0HJwx1FNVV3Cjk5SRPWLbPgqKBOI+1jQ816baY9QQOqnSd21Ujf3au8O84eH45wSysrKa30ctXWTx09NC1XySyuRrWonVVU5H+yDtlFF2dUEkNNFE6lro2Q921GoxqsflqY5Jsm3khi9tdZPVUeibLNI9KK51LXVaouOLh7tEzj+EcvwTwA6DRdpekq+upqSG6K19U7hpnzU0sUc68sMe5qNcuVRNlKYe07RtRdWWyK9sfWvl7lsKQS8XHnGPZ8SjWvZ5Q6zprRTvq5qGK2zJJGynamFbhE4fLZEwvQg/azTO0drrTvaFRxYjbMlLcEYnttwu6+KqxXplfyWgdFvevtMaduSW67XVtNVuajmxLDI5VReWMNVFL971lYdPVEFNca7hq504oqaKJ80z08UYxFdjnvjopoadKfV3aY2ub3c9u09TI2CTGWvqp0Ryqi9eGNGe5XkS7HZFvfaBrq+V/wB8uDalsDFf7UUaukThTywxifyQOk0OorJq2x18lsq46yFrHwzxuYrXMXCorXsciKnXmhBP2O3+zio/7yl/qRkl0/oGl0nc9T3eCunqH3hzpnxyIiJHu92Nue713ObdmtxqrT+x51LXUTnMqYpqhY3t5sVY404k92c/ADqVw7StJWyuno57r3k1N/pCU1PLOkP57mNVG/FTafdVY1067UDblDJamtRzqmNVe1EyibomVzlcKmMp1Il2IW6lpOyu2zQsZ3lY6WWd6Ju93eObv44RqJ8CN9kyra+0/XOn6TKWuOd0scSexG7jVMInTZce5qeAEzXtd0IlDJWJqGFYmP4FRIpONVxnZvDlU80TCeJv9PaosmqqJ1ZZLhFWQtXhfwZRzF8HNVEVPihy/sHoqVlTq6VtPEkjbisTXoxMozLvVRfDyMa2U7dN/smqigtTO7o7lSOkqIItmNXu1fnHL2m5/lr4gdPvWt9PWCuSgrq9fTVZ3i01PDJPI1v5StjaqonmuDOseobTqW3pX2avirKZV4VfGq5avgqLui+SocT7HrlqiuTUN6t9ot9fV1tdmpnq6x0L2rjKMREY71U4l8PDGxsYLDqvQ9Fr/Uk0dJRx3Clknhho6hZO5mVV9ZMtTlxOUDolx7R9KWy4T0M90WSop95201PLOkKdeNY2qjfic77FK2hbqTtGrqeRiW5Ktk0b2NXh7rjqFRUTw4STdhltpqPsuoKqJje/rpJZp5Or3JI5iZXyRqJ8zRdi0MdPrftHghjbHFHcmMYxqYRrUkqERETwA3+in9nldrq73TTFZ6ReqqJ0lUiJIjWtV7VcqcTUTd3Cq7kjvOutPWKudQVla99YxnePp6aCSd8bfFyMavCnvwc/0fGyP9khrJrGtai0SOwiY3XuVVfiqqpk0VdY7R2l6gdpOiud+1DWLitjSVrKWlVF3R0jk23/ADuSongB0awaktGqLalwstdHV03FwK5iKitdzw5FRFRd02VOptDi/Yas7dUdoEE8ccLo69iughdmON/HOjkauEymyIm3JEO0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADCuzro23SLZo6OSuynA2skcyPnvlWoq8vIzQBxrSmge0PS+r7rf2z6anfdZHvqoXTToiK56vXhXu9sKq88k/wBeWq933StXaLJ9HpJWxuglfWyPajGOTCq3ha7K+/BJgBzrSmnNbab7Pn6eVLBNUwNWOkl9ImRiterlcsn3vOU4kxjn1x1xuzfQF90zpu46Z1D9E1VorEkcrqaWR0mXta1WqjmInDhFXOcov1dOAHJdP6Q7Q9BRz2rT1ZZLlZnyOfTpcVkZJAq+PAnLxRFXPNMZU6Bpq0V1qopn3W5SV9xqpVnqJMqkTHKiIjI2L7LERETxXmpugBHtY0moLjYp6DT7baklVFJDLJXSPajGubjLUY1cruvPBFeyzR2rtDUj7RcpLLPa3yun46eWVZmuVqJhEViIqeqnnz5nSwBiXJ1wbbpltUdNJXYTum1T3MjVc/jK1FXlnkhzrsw0VrDQ89VTXCSyVFurJ1qJnwTS96x3Dj1UViIqKqN2VUxvz5HUABz7X/Z7W6hvFt1Jp6vit+oLdhI5Jmqscrc5RrsIqpjLui5RVRfK7RWzXt7q6Vupqu2W6308jZpI7Q+VJalzVyjXOcvqszhVRN1xgngA5jfOz+/27XcusdE1tDDV1TFbW0Vcjkim5ZVFamd1RF6bpnO6oSCxWrVVXeY7vqmtpYfR2OZTW62PekWXbK+VXe27GyJyTnzJcALNYtUlHMtE2F9UjF7lszlaxX424lRFVEz4Ipx+h0D2i0PaNWazZPph1VVtVklOs0/BwKjURE+95ynC3fyOzADlustPdpurrDLZ++0zQU06p3zoKmoV72oueHKx7IvXbflyyY2tez/V9/k07DbJbHDTWJI3QOnml45JGtZniRGKiNyzbC7p4HWwBzLtH0hrLXWnaK0xfQVM1rmT1L3VEy/fU4k4Wfe/ZwqLld8528cjU3Z9X630FR2y9y0VLe6JUdBUUavfEiomPxkRcOTGfBUTng6KAOb0do7TbhQRWW93O0UtGjUjqbjQOkWrmYmM8OURrXKmUV2Ns5RDadplJbKjsvvVNXSr3MVPiNyuV7++bju0yq5Vyu4U8Vz5k0Of2vsks1t1TU3pa64VMc1Wtb6DNIiwpPlVR6oiesrVcvDnl5gbXs40v9yOhrdbJGolUrO+ql8ZXbqnw2b7moRWs7PtS6c11Wan0PV2/u7hlay31/E1jnKuVVqtTxyvTGV5ouDqoAg0Fm1q6mrrpWV1tlvc8Po9PRNlljoaeNVyrlwiue/zVOmEwhqezbQF90xp65aa1D9EVdnrEkcq00siyK57WtVqo5jU4eFF3zlF+rp4A5jpzTGt9B2+osllW03W1rI59FLWTvhkp+LdUe1rVRyZ32VN1XlnCbrs+0Iuj6evq66rbXXq5zd/W1LW4arsqvC3yy5Vz1zyTZEmhiXOmqqy3TU9HXvoKh6IjKmONr3R7pnDXIqLtlN06gcL7JZNU09z1XNY6a3VlItwc2Wnqp3Qua/LsOa5GuymOaL4Jg6Ho/Qlbb9UXLV2o6qnqr9XJwI2mR3c00eycLVduq4a1M4Tl5qq2tH9mM2jLnLVUOqK+WGpk7yqp5YY1bOu/NcZRd+aYOggcpZoDVOjdV3G7aHqrbJb7k7vKi23DiajXZVfUVqdMrjdMIuMLhCT2nTd3uDbjU6xrIamavplpFoKJz20sMK80RHbueud3LunJMIS8Aco01pDtB0K2ezWOtslfZXyOkp5Lh3jZIM88tYm/uRd139XKnuhdCaz0fqm9XCaustbS3adZahyrI2Vyo56tcjUbwtVeNcplUTPPY6sAOU2DRWuLZ2n12rqp2n3RXFEhqoIqiZVZFlm7MxplyIxOey78umPY9B640dq2+1On6myTW+7zd6sld3ivj9Zyp6reapxuTnhduR14Acu0RoXVejNaXeqWstlwtd2lSaqqJOKOfiTjXKMROFF4nu2zjHhyOogAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHJ5u0zUH0lPSU1vopVZI5rWtikc5URV8HFxvaBq9XIi2OLCr/ANll/tHv/d2b5fd5/wAVjdUAB4HoAAAAOadoep7zZdQU9Nbq10ELqVsitRjVy5XvTO6L0RDbp8Fs9+yrPJkjHXul0sEM1zbdTV81CtimlbEzPeNim7pUdlMKq5TKf46kupWzMpIW1D0fOkbUkc1MI52N1T4ktjitK27onft8Fi0zaY14XQCO64uVXadLVFXQzLDO17Ea9ERcZciLzOcdJyXike62tFazaUiBFtAXWuvOnHVVwnWaZJ3M4laibIibbIniSkuXHOO80n2KWi1YtAADN0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8VcIqnJKbtN1LWSrHS2yjneicStigkcqJ44Rx6MHTZM+5p7M8mWuPXd7uuA5hQdqNbBcG098tscMaqiPdE1zHR+atcq5OmQyxzwxzRPR8cjUcxycnIqZRSZumyYdd8eTHlrk/hVgAwaAAAAAAAAAAAAEd1lZq+82ZGWypfDVwv7xqNkVneJhUVuUX7fA7x1i1orM6+bm0zEbiNpEDmNRR66v8VDbainWgip1RJaps2FftjiXDvW28Oq/LpUESQQRxI5zkY1Gorlyq4TG5pmwxiiPzRM/JzS839tLgAMGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4Lar2zT+sp7jJC6ZrJZW8DXYVc5QnNH2qUtXWwUyWuZqzSNjRyyptlcZ5ELsNwoLXriWquWPRmyTI7LOPdcomx0FuutHI9qtVqORdlSkXZfkfe6ulbWjeObceY2+dgtMRP5ojlG+1WonhvlE2KaRiLTZw1yp+MphX3S1+Wy/dJX17ZJeFsjoUVeKNi4RMLy2ym32mT2tf69of4t/wCpSb6s/aBW/wAWb9qHFM1sWLD2+/8A26tSL3yb9kc09q+sg7OrhW1Eiz1VE/uonv3VeLCNz44VV+CEasGn77rKWouS3J0fdv4e/le5VV+M4THLGU92UwZembbNdezq/U1O1Xzd8yRjU5uVuFwnnhFKdEa3pdNUNTQ19PO+N0qysdCiKqLhEVFRVTwQ17ZpGWcEfm3/AE4cbi3ZGSeNMFk14g19RUt0qpXVMVZBFIqPVUciK1EXzymF887mw7V/21Uv8SZ/Xeatbm+89o1HcXwuh7+ugc1juaNy1G/UiG07V/21Uv8AEmf13mkRMdRj3Gp7XEz/AIdtfFsO1iomhr7akU0jEWJ+Ua5Uzuhf13fLjbtPWWmpJ5IW1VPmWVi4c7DW7Z5pz3MTtc/1hbP4J/2obrUl1slJp+00d8ttRVQzU7HxviRPVcjUzheJFRd0+Z5ceox4J7d+eG1/4snOvCK0OlZqyhp66yalhnuUiNdJAkvdvavXfizlPNEJRqtLm3sxe28cC1zXsbI5ioqO9dMLt5YIRfrXpmC2R11lvEskrlT/ADWVMvTPPdETGPP5m+lrK2t7HZZK173q2oayJ71yrmI9uN+uFynwNclbWtS++O6PMalxWYiLV+Xx3DV6a0xeNT2KSOGuZT0EEruGN2cSSKiKuUTyxuvw6mx7ObtX0eppLJVTPdE5Hs7tzuJGPZ4eHJSSdlv7Un/xp/2NInpj/a1N/Gar7HkvknJ62O0cRHC1r2enaPMuxAA+C+iAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8d7K+44p2a19HbtSTzVtTFTxLSOaj5Xo1FXiYuMr7lO1u9lfccI0Rp+k1JepaOskmZGyndKiwuRFyjmp1Rdt1PqdDFZw5e/xx/wAvJ1G++mvLcdpl7tV3qaFlvmZUSQI/vJWcsLjDc9eSr5ZL+qbZW0uhLBWo+aOWnibFM1HKio1yZbn3Yx8SXWrs7sNqq2VTY56iWNeJnpD0cjV8cIiIvxN3fba28WOst7sZmiVGqvR3Nq/BURS/jMdJx0x77az5n5p6Frd1reZRe3ajVvZW64ukX0inp3U/FnKo9PUaq+e7V+JHtCVE1ssd61HUvklbBH3ULXvVUc7ZVT5qxM+8h7btPT6eqbI5HNa+qbMqeCoioqL8eH5HVnaclh7Ln2mKNVqVpu9c1E3WTKPVvvymDfNjpgrNZ/12/ozpackxMf6Y/qg1ms161/V1VVVXJzWRKmXyZciOXdGtamyISHSUeqLBqZbXWw1dRbXOVjpVY58bdvVc1y8k5Z9/ihrOzrVVuscFZR3KVYGSPSWOTgVyKuMKi4RV6J9ZJrTr9981Q210NvR1Krnf5w56ovAie1jG2envQdTObd6RSOyI+3zgxenqtu78yJ3q53XWesHWejqXRUqSuijYjlRnC3OXuxz5Kv1F2fTmqdG3GnltMtRXRLlVSCNytXxa9m/P/HIwbfVJpDtGlfXtc2Fk0jHqiZXgdnDvdui+4l157T6KmqIYbPB9IcSes5eJiIvRERUyqnd/VrNceGkTSY/T7ua9kxNrzq22q7Uayfis8kbpoO8he5WZVqpnh2VPFDFv+p6u801r07ZXvkesUSTPjdvJJwp6ufBOar4+4v8Aas6Rz7M6ZiMlWF6vai5RrvVymepo6u31uirlarxSKr4ZomTRvcm2VanGx3zX4L4oXp6UnDjmf4udfUy2tF7fDjae1lkXTnZzcYfSJJKt0PHNNxru7KbJ4InI0+g74y06Qu1xrZXyNhmTha52Vc5Wphqe9SQ3y70187OK6vpHZjkg3avNjsplq+aHLdM26r1DWwWRj3No+9WonVPxURERV9+Nk83GOCnq4L+tx+bn9HeS3Zkr2fDhfsd3r7jregqKipkV89Yxz2o5eHd3JE8PI3ms6mePtIpo2TyNZxQeqj1ROaGNWwRUva5T08DEZFHVU7GMTk1EaxEQt9osz6fXSzx4442RPbnxTdD0xq+asxGt1Zc1xzv2ltu0zVD3VTLLRTOa2FUfUPY7GXdG5Tw5r5qngb/swlkm0rI6WRz3elPTLlyvstIitgkpezq5Xuuy6ur3RuRz+aMWRq597l3+RK+yv9qcn8bf/VaeTqK0r0k0p7Trfxn3bYptObdveE3AB8d7gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABGpdAaYmlfLJbMve5XOXv5Eyq8/xilOz3SyLlLX/wDcS/2iTg2/E5v98/eWfpY/9sfZqLtpiz32eOe5UffyRt4Gu717cJnP4qoZ1Xb6Wut76Gpi46Z7UY5nEqZT3ouTJBx6l+I348fJ121548tLFYobFZ6yLTtOyCoeivY17nPa56JtniXryObrqpaKvmXUmlKKaq4vbWBI3Z88ovF03+07ED0Yepiu/Ur3b99zE/dnfFvXbOtOP2O33PV2to76+jWmo45o5Vdj1URmOFrc+0vqpnH1HSLtpWy3yrZVXGi7+ZjEja7vXtw1FVcYaqJzVTcAmbq73tE1/LqNRophrWJiedtTd9NWi+yRSXKk790SK1i949uEX81UMmrtFvrrc2gqqVk1K1qNax+/DhMJheaL58zNBh6l9RG548fJp21548ovH2eaYjm7xLcrt8o10z1RPhnf4m7rrRQXG2/R1VTNdSeqndNVWImOWOHGORmg6tmyWmJtaZ180jHWI1EMK1WihstItLb4O5gVyv4eNzt165VVXoYdLpWy0V3ddaei4K1znPWXvXru7PFsq46r0NyDn1L8zuefPzXsrxx4AAcOgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA01p0rZbHVOqrdRdxM5ixq7vXuy1VRcYcqpzRDcg6i9qxMRPEpNYmdzAaPVWok0xaWVy0y1HFKkSMR/BuqKuc4XwN4BSaxaJtG4LRMxqJ04ppqw1uqdUrcp6RYqFahaiZytwxcrxcDc888vcdrAN+p6mc9omY1EeIZ4sUY4R246H09c6l1TPQI2Z65c6J7mcS+Koi4z5mxtNhtdjjey20jIEf7aoquc73qqqpsQZTmyWr2zadfV3FKxO4jlqrvpu031GrcaNkr2JhsiKrXInhlN8eRjWvRlhs9S2ppKFO/b7Mkj3PVPdlcJ7zfARmyRXsi06+p2Vmd65aq76btN+fE65UnfuiRUYvePbjPP2VTwLtZZLdcLWy21VK2WkYjUbGrlTh4eWFRc/WbAE9S8REbnjx8jtrzx5aWl0pZaK31VBT0asparHfR99IqOx73bfDBetGnbVYe9+jaRIFlxxrxucq45buVfE2gLOXJMTE2nn5kUrHiGnl0tZp7yl3ko+KvR7ZEl716es1ERFxnHROhTctJWO713ptfQpNUYROJZXpsnLZFRDdARmyRMTFp4+Z2V+DEuFso7rQPoayFJKZ+OKNHK3kqKm6Ki80QotVoobJSLS26DuYVer1bxuduuN8qqr0Qzgc99u3t3wvbG965AAcqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH6hv8AdZdQw6b093TKx0fe1FTK3ibC33b78uaLzTx2po6nVtmvlJSXVW3agql4PSaenw6BeiuRqYRN+vvzsBMgeKqNRVVcIm6qc+ortqvWM1TWWSsp7Za4nrHEssSPfKqdVyi46eGM43woHQgRXSuoa6trq6yXqOOO7UOFcsfsysXGHJ80/nJsnI1q3fUep77X0tgq4bfQUD+6dUSRI9ZX9UTKKmNvljxwBPARTSWoLhW1tfZb0yNt0oFTifGmGysX8bHyXps5NjAS56m1PdLgyyVdPbbfRTLAk0kaSOmenPmi7cvmnMCdAiukb/ca6suVnvLIkuFvciOki2bK1c74+XzTYkNwrorbbqmunz3VPG6R2OaoiZwgGSDndLWa6vNpffqSrpaeF2ZILesKOWRidOJUzlcbb7+WSSWLVMF10mt7mb3SQxvWoY3fhViZdj4bp7wJADndFW641BbpL3QVdLR06q51NQrCjlkai9XKmd8YzlM+SEj03qiK9aYddqhiQup0elU1EXDHMTK48sYX44AkIOd0FfrXU9HNeLbWU1BScTkpqV8TXLKiL1cqL7s+KLyJLpDULtSWRKmaHuaqKRYaiNEVER6Y5Z3xhU93LoBvwYtVcaOiqKaCpqGRy1LuCFrub18E+aGUAANczUFmlqkpY7tQunVeFI21DVcq+GM8/IDYgsVFZS0ixJU1MMKyvRkaSPRvG5eSJnmvkYFRqS0RUdXNHc6J607fWTv24R2Fw1VzzXC7AbYEc0tqeLUlmjkWelhuD2vV1PHIjnRojlRHK1Vzjku/iX9KNqW2X/OrzDdnrK5UqYXI5uPycpzx+nAG8Brk1BZnVfoqXahWozw936Q3iz4Yzz8jS63uVZbWWZaOofD31wjik4fxmrnKKBKwWaqrpqGBZ6uoighTnJK9GtT4qWqG6UFzY51BW09Sjfa7mRHcPvxyAywYdddbdbOH06upqbj9lJpUYrvdldy/TVNPWQNnpZ4p4XezJE9HNX3KgF0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGm1TffudsE9e2JJZUVGRRryc5VwmfJN1+BFq+o11Y7X9N1dfRVEcfC+ehSFE4GqqZRHImVVM77/MDoQMW3VrLlbKWujRWsqImyoi80ymcGn1jqKXT9si9DhSevq5Uhpo1TKK5euOvTbxVAJEDntdctX6Sjprnd62nuVA56MqYo4UasOeqKiJnwyv6SQas1KthsUdVSRpUVNS9sVKzCqjnOTKLhN1THT3ASIHPK+4az0rSwXe6VlNcKPja2pp2RNasSL4OREz4Z8VTZSR6m1PHZNOsuVOxKiWp4WUrMLh7nJlM43xjf6uoNpACBSs7QaChS5urKSse1EfJbmQJnH5LVRMqqe/5k0oKp1bb6epfBJTvljRzoZWqjmKqboqL4AZII7rHUUun7ZF6HCk9fVypDTRqmUVy9cdem3iqEfrrlq/SUdNc7vW09yoHPRlTFHCjVhz1RURM+GV/SB0IEd1ZqVbDYo6qkjSoqal7YqVmFVHOcmUXCbqmOnuI9X3DWelaWC73SsprhR8bW1NOyJrViRfByImfDPiqbKB0MEe1Pqdlk02250zEnkqFaylaqLhznJlFXrjCKv1dSOV1drfTdBFerhV01dTI5vpNG2JGrEir0ciJy5Z3wvigHRAWaWpiraOCqhXMU0bZGL4tVMp9SluK40k9fUUMU7HVVOjVliTmxFTKZ+AGUAUySMhjdJK9rGNTLnOXCIniqgVAwKO+Wm4TLDR3KkqJfyIpmud8kUpm1BZaaZ8M93oIpWLhzH1LGuavgqKuwGxBh0d3tlwkdHRXGkqZGpxK2Gdr1RPHCKWpdQWaGqWmlu1CydFwsbqhqORfBUzzA2IMWsuVDbmsdXVtNTNeuGrPK1iO92V3MVNTWFVREvdtVV5IlXH+sDaAxa25UNtiSSurIKZjlw1ZpEblfLPMro66kuEHf0dTDURZxxxPRyZ8MoBfBFK5l2k1DJcdPXKkrGMjWnqqCaocrI3ovNEblGu23zhdl8dsvS8c1GyqpLjeIa66vldPPEybi7hFxhqNVco34JzAkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADlclFcbp2q3qgpa+ShSSNj55otpO7a1mGtXplVb8vguxqY7jofUFq4LtV11rr5kp5Iqt/G6NVVPWRfjnbw8zcag01cJr1DfrDVRU1yjZ3cjJkXgmZ4Ox/jlywYdNpq+3q+0lz1PPStioncdPSUmeHjyi5XPuTqvLp1qJs5qOarXJlFTCopzu1N1HodKi2RWR92t7pVfTTQyI1Uz0cmFx06c88ze2W63C5a1vsDp0W2USMiji4G/hFRMrxYyvsu69TXsserNP1VSyw1dFVW+eRXsirlcroVXwVOnLr8PENTphtyk7Va2ouUccVXLRLJNDGuUiReBGtVeq4Rptuy7iSw3Bsn4dtxk7387habPSmmZ7M+suFyqW1V1rncU8rU9Vqfkt8vlyTbY1s2nL/Y77W3HTMtG+CuXjmpqviw1+c5THvXqnPqB5Set2xV/dcm21qTe/LMfoN7qbUlPpy3pI5qzVcy8FNTN3dK/wB3humV/SqGHpTTVTaJq25XSpZU3WudxSvZ7LE/Jb/joidDRTaU1e7VM18bV2iWbKtgSdXuSJmdkanDsuPtXxA3mjrDV22KquV1fx3W4vSWdE5Rp0Ynuyv2dC5r5JHaHuiRe13bVX83ibn6slyzN1PTzzSX+otj6VsSq30VHcSOym65RNsZMLSE9bqfSE8t7k75la+RjURqNxF7ONk8UduBudNKxdK2hWez6FDj+Yhz2zJI/s11U6mXEK1Uyx/m4bxf+U2kGndaWu2y2K311vfb3cTY6mTiSWJjuaJjku6+PPZUJRZtN0lo00ll/DROjc2dypjvFd7S+XPHuwB7pJWLpC0LH7PokaL7+FM/XkhNmSR+j9bup1+8OqKnu/dw+t/5cGdTad1lZKGazWmtoJLe9XdzPMrklha7njGyLz8fgSbT+mqWxadS07TNeju/cqY7xzkw7bwxt7kAt6JWN2i7Ssfs9wiL7+v15I7pGpnpJdYVdNTPqmMuD3QwRru93E7KJ8Fae0undYafp6i1WSsoZbfI5ywy1CuSSBF8MbZ+C774TJJtLaei01ZWULJO9lc5ZJpcY43rzX3bInwAgOo9SXOrvun55tN1lNJT1DnRxPdvOvq7N9Xy+snlgvdfd5J21ljqbakaIrXTLnjznZNk5Hl8sMl2u9lrWTsjbb51lc1yKqvRcbJ8jegavUb6Fmnq1blUyU9GsfDLJEuHYVcYTZeecfE5befoSTSb22zSdziaxjXRXGWDh6p6yvTOUVPhv0On6msiaisFTbe97p0mFY/GURyKiplPDbHxIrWaa1ndrGtorrjbI6dkaNRYUdxTK3HCjlxsmyLsnTkIJYerGvumltHNnldx1UtOj5EX1suYmV9+5K6nS9jobBWwQWumbH3KuXLOJVVrV4VVV3VUyu/mYdw0tXVVp0zSMlp0ktckD51c52HIxqIvDtvy2zglU8LainkhfnhkYrFx4KmAIb2a2+jZpCkr2UsLauRJWPnRicbk7xdlXnjZPkRm3XCot3YxUS0z3slfULEj2c2o5yIv1ZT4ku0hYL7p1H26qq6OotTUcsKsRySo5VRd0xhE9rqp5ZdGOg0PNp66yRv71znK+ncqom6K1UyiboqIvIDyLs80/Lp2OiWlYkrok/ztv4Tjx7Wff05GBrKkfQWnTFJJUPqHQ3GFnevT1nImcZ+B62wa5S2pZvpe3tokb3aVSI/vu75Y5c8f+5s7zpSoq7XZKGjqGuS31McsklQ9eJ6Nzlcoi7qq+4CPaxqvS9fUdBVW+tuNDS03feh0zc8b1VfWVOqck+HmpZpWTRa1tVdZtM3S1wvcsNY10CtjcxVREXCbJjOV9yEq1JputrrnSXqy1bKa60ze7++57uSPf1V2XxXp19ylFutWqaq8QV18ukEVPAi8NJQOc1si/v8APNPLf4bgaG9UFRbda112uenpL3bqiNrYnRtSRYEREz6i+79OeZu9Cu0/JFXzWCSoY2WRHTUk23cO3xhvTPvXl5FVda9V0l6qK2zXOmnpajCrS16vVsS/vcdOfh8cF/S2nKq01VwuVzqYp7jXvR0vctxGxEzhE8ef+OahJQARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFqKvqNX3N2l7O/FKxyLcaxN2sRF9hF6rlPiqY5IpJtS0d1r7JLS2epipqqVUassiqmGdcKiKqKRWzad1rYbe2it89hjiReJVVJFc5fFV4d1Kid0tNFRUcNLA3hhhjbGxuc4aiYQhutdtXaQdJ+B9Lcn8rLOH6yXW5ta23wpcXQurEb99dDngVfLO5q9WabTUtpbAybuKqF6S0835Lk8cb4X9S9CKxu0NWJoS595y4WY9/eNx9ZHb22VkPZ8s/sNlgSXP5eI8fpMup05qvUq0tFqGpoYrdA9Hy+i8XHOqePTx8OfIkOqNNxajsnoKP7iWJySU8iJsxyJhPhhVT/wBiot66WNuiLqsvs9zhPfxJj68EG1U+tg0tomSLHfMbG5mUynGjWcGUN1Vad1dqKKntt9q6GK3RPa6Z9Nxd5Pjxzt9idcLgkuotN01+sX0bxdwsfC6nkame6c3ZMJ4Y2AjN20teLTaJ7vTaouMlwpo1nlSSTMT0amVRG9E54RcoS3Tt1W96eori5qNfPHl6N5I5NnY8sopFKqza6utClorq+3R0bkRk1VFxLJIzqnL9WfEmlst0FptlPQUyKkMDEY3PNfNfNeYES1rtq7SDpPwPpbk/lZZw/WbHtDViaEufecuFmPf3jcfWZOrNNpqW0tgZN3FVC9Jaeb8lyeON8L+pehH6nTmq9SrS0Woamhit0D0fL6Lxcc6p49PHw58gMS9tlZD2fLP7DZYElz+XiPH6SUa6WNuiLqsvs9zhPfxJj68FzVGm4tR2T0FH9xLE5JKeRE2Y5Ewnwwqp/wCxHKrTurtRRU9tvtXQxW6J7XTPpuLvJ8eOdvsTrhcAYV5SRmntBuqF+8Nnpu9/mtx9WSY6xWNujbusns+jPRPfjb68Huo9N09/0+trykPBwugeibRuamE28MZT3KRmp07rG/UkFovNbQR29jm99NTq5ZZkTlnO2fgm++4GdbbtcbPoqxOgs9RcnyU7cpCuOBuEVudl6KnyI3bdS3SHXF6rGaarJZ6iOJJKZrvWh4WtRFX1evM6nBBHS08VPC1GRRMRjGp0aiYRDT0Fiko9WXa8umY6OuZE1saIuW8LUTdfgBn2mtnuNsiqqmikopn8XFTyrlzMKqb7Jzxn4kT7QFfW3DT9jdK+Okr6pUqOFccTWq3b/wA3zwTk0Gq9OfdDQQpDULTV1LIk1NOn4rk8fLZPdhPcRWJcNAWeobTPt8f0ZVUz0fHPTJ623Rc8/eu5re0iz22LSldXx0FM2sWSNVnbEiPVVemd+e5W+wavvUlNBe7pRw0MT0dIlCrmyTY8VwmP8bG81fZai/6bqLbSPiZNI5itdKqo3ZyL0RV6eBUKa1We1WaWqjpoKBHUi99UQRox7W8OVXKJ8fehz2VNOP03VQ2vSl1q2d29WXGSD8ZM+tx+CL0x05HTq+1NuOn5rXM/hSWDule3fhXGM+e5EIdNaySyLYX3K2x29IliSVjXLK5mNm8sIi8lXnjxA2OjqOkvmhLQ66UsFYsbXtZ37Efwoj3NTGfJE+RqdDWK01c19WottLL3NykZFxxNXganJEzyQlek7RUWLTNHbap8T5oePidEqq1cvc7bKIvJfAsaWsNVZH3ZamSF/pla+oj7tVXDV5IuUTcCFV1Wy4doN1kuFlr7vDRI2GCnhj42Rbbq5PNUVU9/khn6WjqaXXEj6Cx3K22mrgXvYqiJWsbImVRU6Jyx/KU3F303d4NQvvunKuniqJ2Iyqgqc93JhERF2Tnsnh791MuxWq/suctyvt0bI5WcEdHSq5IWeaouMr/jPLARO2X5tgdq6djO9q5bq+KlhRMrJIrnYTHh1/8Acr0Nb6q2a/utPXTLNVrRtlnf4verHL8lXBtrJoWSj1hcL5cJIZWvqJJqSNiqvAr3KvE7KJuiYTbP1IbSisFVTa6uV8fJCtNVU7ImMRV40VEbzTGMeqvUCRgAigAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADFpLdSUL6h9NA2N1TKs0ypn13rzVTKAAAAAAAKJY2TRPikbxMe1WuTxReZbo6Ont9JFSUkTYoIk4WMbyRC+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADGqrhR0SZqamKLyc7dfhzL0M0dRE2WF7Xxu3RzVyigVgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaTV9wqrVpatraKRI6iJGKxytR2Mvai7LtyVTqlJvaKx7pae2JmW7Bym23jtEu1EysoUZNTvVUa/hhbnC4XZcLzMvvu1D/AHDP/wCD9Z7J6GYnU3r92EdRE8xWfs6WDnuhdS3y76hrKG6ztekELlViRtTD0eic0T3kpu2qrPYqplNcatYZHs7xqd052Uyqfiovgpjk6bJTJ6fmfly0rlravd4j5tyCG3TX+nJrRWxU11d374Htj4YZWrxK1cYXh236kf0JrK3222VLL3dJu/fNlnepJIvDwp1RFxvk7joss45v2zuPbUuZz0i0V26kCMf5Q9LqqI25K5VXCIlPJ/ZNTry8y0twpaak1Ay2SsjV8rHskXjRVThX1WOTopzTpctrxSYmN/GJW2akV7onaeg5Har1dKiv7hNYQ1EkkUjIo0ZK311YvCqq6NETC4XK+BhXa+6os/cI7UtNVd7nHosrZOHGPa9Xbn9Snoj9nXm3b3Rv9f8ApnPVViN6/s7SDl0UGsquRIafVtsmlci4jiqmq5fciNOoMRUY1HLlUTdTy5sHpa/NE/Rrjyd/tp6ADBoAAAAAAAAAAAAAAAAAAAAAAAAAAAAeKqIiqq4RAPTT3K79w1Ui8cIvVV8jG1BqaG0UjHJHLM6WRIY2RIivkevJGoqoUR5WFkk8bY34yqcWUavhkDHpVrPS0q3yKkmc4dyx4YKrzeJ44lxIrMb4YuN+iF19RE1iuR6KqdEIpcKl1ZVcLMuTOERPxlKiW6Qr56qhlhmRzkhd6si9UXfHw/SSMwLNb0tlsip8J3mOKRU6uXn+r4GeRQAAAAAAAAAAAYNdcm0r0hjb3k6pnhzhGp4qa51wrXLnv0Z5MYmPryBvwRyS410bHPSpVeFFXDmNx9huLZWOr7fFUOajXOzlE5ZRcAZYAAAolmjgYr5ZGRtT8Zy4Q1s2oKGJWtY58qudwpwN2z712A2p4qoiKqrhE6qRyov1Y/LYoo4PNV41/Qn2mpqHzVS5qZ5JvJ6+r8k2+oCVT3y2wLwrUte7wiRX/ZyMX7p6Lix3VRjx4U/WRlWoiYRMIUqhUTqkraeuiWSnkRzUXC9FRfNCBX+9VFbcldTyyJSx+q2NrlRHJ4/EzLZXuttZ3qIro3JwyMTqnRfen6VLd5gtUrXVVDO5kjt3QLG7mvhtt9gVqIpI5WqrNl6pjc2NtutRa5uKJeKNV9eNeTv1L5mohietSkiNVrUTfO2TLVAjodvuVPcoO8gduntMX2mmYcygqJqSds0Eise3kqEhZrPgjiSajc93KRzHY+KIRUsBiUFxpblEslNJxY9pq7K33oZYAAAAAAAAAAAAAAAAAAAAAAAAAAAACOah1pbdNVcVNWw1T3yR94iwsaqYyqdXJ4HePHbJbtpG5c2tFY3KRggv+Vew/wDZbj/4bP7ZJ7DfaXUNu9Oo2Ssi41ZiVER2U9yr4mmTpsuOO69dQ5rlpadVlswAYNAAAAAAAObt7S699TNWR2ZZLLFL3bpmZ40Toqryz1x54ybYenyZt9keHF8laa7nSARTSGqK3U9XcJHUrIrfC5GwP4VR7squyrlUzjGceJKznLjtit2W8rS0XjcABHtX6mdpe3wVTaRKlZZe74Vk4cbKueS+BMeO2S0Ur5ktaKxuUhBrbBdFvdjpbisKQrO1V7tHcWMKqc9vA2RLVmszWfMLExMbgAByoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARvX/wC0e5fms/5jSSEb1/8AtHuX5rP+Y026b+dT6x/dxl/gt9EO0lr+02HTtPb6qCsfNG56qsTGq3dyr1cnibv/ACr2H/stx/8ADZ/bKtAWe2Vej6Sapt1JNK50mXyQNc5fXXqqEm+56y//ACe3/wBGZ+o9nUX6aMtu6s73Puwx1y9kamHO+zaoZVazu1RGioyWKR7UdzwsjV3J/dtL2a+VLKi40ffysZwNd3j24TKrjZU8VIH2eMbHru9MY1GsayVGtamERO9TZDb671jV2mZLRbqeVtXMxFSdW8kXb1E6r0z0+zvqKZL9VrFOp1DnFatcO7/FENbUVkprpHabBQok8WXVD2yPfuiZ4d1VNkRVX+5SP2ltPTVdLWXKjWotrpVikTiVOiZwqKi5RHIvmbe2XCk0226wXKhqJLvNE+DjVyKkSOb9qqu6/wB5atF8tdPpits9yo5pu/l72OSNUzG7hREVM9dvkuD6le+uPsiJmOOd8zvzMPJPbNt+HT6fQ2kqmCKpp7ex8T0R7HtnkwqdF9o1mv6VyVlFPT0NqnlkY5sjq6VjFwmMInE9uea8iJ6c1Nd9H+jw1dNJNbapqSxRqvRerF+1PsJF2i1dK6otDam0yVbpWOWNneuje1VVvq4TOV5HzoxZqdRWLW7o51zv/mHqm9LYp1Gp4aW1zMoK5tVdbdZYqKNru8fRSxvlblMIrUbIq81ToR98enXanYyOWqbZUxxSOTMi+rldseOxvrBDbvuogt1Vpl1LM9kiq2plc9MIxzkyxyb8jGs63u/Ryvttgs07YlRHqtJC3Cry5qh7Yntm1p44j3iI53r3nl55jcRHz+H/AOLVmudjseu21tLJOtqjaqMc9qq/ePC7fnKp22nnZU00U8eeCViPblOiplDj7HXCi1PbbVd7FZ4VqZY+JraSJVVjn8PNucclOxRsZFG2ONqNY1Ea1qJhEROh839o6maz76873w9XS7jcKgAfMesAAAAAAAAAAAAAAAAAAAAAAAAALNRUNgZ4vX2UA8qaltO3xevJCKrqijqaKtrpJ5EoqNytfO9vCxypzRvVcLty3XZMnmporvXWmWntL42VdQvAs0j1akbV5qmEVc9Ex456EXtFrrb1cW0VVNTOsNqc1qQ00Stjlnb+LlVVXNb1Vea9ANxZKKe41X3R3VjmSOavoVM//wCGiXqqfluTdV+B5dbpIsvdxrjH1f3kmli72JzEXGU5mtZZaaCSSqm++uxlGu9lF/SBo4qK71tL3kUM0kK9UTn+skGnNNyU0yVlcxGyN/BxqucL4qYdpustLf20zfWhmVsbm+C9FT5k3AAAAAAAAAAAAC3LUQwJmWVjPznYMR90j5QRPlXxVOFvzX9QEPuF9jg1hW0D1xO1W4a7k9vAi7L4oim5YqSRtenJyZQoqLbT1dzS5VMEK1SN4Ec1uNvNepkK0Cw9iPY5q8lTCl+3Vz7dSNplgWVrVXhc1yIu653RSl2E5qUqgGY++SY+90a5/fyIifVkw5rjXTc5kib4RNx9a5/QUKhSqAWHxo9/ePy9/wCU9VcvzUtTRJLE5irjPJU6L0UyVQoVCox2PWaPLkxKz1ZE8/H3KeKhcfHlyPavC9ExlOqeC+JbV1Qnssgz+Uufs/vCqXMVGorsNReSuVEyUPjcz2kxkpSmRZVmmcssq7cTk2RPJOhUjkiciO/Au2cn5K9FT9IRaVChUL8jFY5WrzQtKgEfv15moI5obfTpVVscXfOjz7LM4yqc19yeCkeptQXist6XO3TR1qR/6TQvjRHx+bVTmnh+kx9T19Rp3XsVyaiuhmhajm/lM5OT37IvyKr1Qy2arj1TYFR1LKiPnib7Kou+cfkr9S/UVI7RqOgvFE+ojkSJ0SZmjkXCx+fu8y/bbvRXiGSSimSRsbla7bCp8PBSE3u20t8ti6iszMP51dMnluuydfHxTcotENZJVtvOm441a9eCqoVejUjXrz/FXmi9AOlUtXPQ1DZ6d6te35KngvkdCtNzjutE2dicLkXhez8lxzfdWorkw5U3TOcKbzS1yioa2SCZeFlRhEcq7NcmcfPPP3BE6ABFAAAAAAAAAAAAAAAAAAAAAAAADkXaz/r+i/iv/rcddORdrP8Ar+i/iv8A63H0P2Z/mI/V5ur/AJUt7S1HZ4lJD3rbd3ndt4sxLnON+hvJbvZ7BpCW62qGJ9Ei5jZCnCj3q7h+3n7jUUugNKy0cEj1fxuja53+c9VQ3FbZ7FTaPdaJqplPbVy1sr5k9Vyu4k9ZeudxkthtaIibTzzE/ArF4iZ1EcIXSak17eqWW5W6KNaSJyo5kcbMLhMqiI71l28CUaW1TXXm01q11ItPWUsfFxcCtbImFwqIvhjch8Ok7/bKaWt03fYaukaqqq0s6t4lTnlvsqvxU3WjdX19+o7lQXFWyyxUzpWTI1GqqclRUTbqh6eox0tjmcda6jXjiY+rLFa0WiLTO5+zR27tF1NVPfSwwx1dXKiNha2H2V5quE57fDqXqXtB1FZ7w2nv8PFHlO8jfCjHsavVuMZ+vP1mJ2XVdLTalmbUPaySanVkTnLjK8SLj3qifUZPavV0k94ooYXsfPDE5JlaucZXZF8+a/E3tjxT1HoenGpjyzi1/S9Tu5TPWOqp7FQ0/wBHU3pNRUoqsdwq5rGpj1lxz57EOqtT69tNNHcq6JraSVU4UkhZjfdEVE9ZPiZupNW3LTtnstrolbFVOoIpJZXNRyt2xhEXbm1cmr1VbNS0lgbU3q+xzRSvaiUzZFXiXn4Ii45+Bj02Gta1i9a8z78zP0+DvLeZmZiZ4+yY1Wrpqrs8nv8AQtSGpZwtVjk4ka7ja1ffsv1kSturtVXS3SUtpomPna9XyzQ07cI1UTCY5Z2dz3X4FdB/sXuX8YT/AJkZvuydqJpmrdj1lrHIq+SMZ+tSTTHhxXt2xOrajf6LE3yXrG9bhh6F1rcK68JZroxiveju7e2NI3Nc1Mq1UTCckXpzPdU6+r4r06zWCJrpmP7p0qs43Ok/JanLZdt8/r0loRE7Y5Mf9tqPseWNNSR0PaivprkY5KmePicuMPXiRPmq4+Jrbp8XqTk7f9O9fPlxGS/bFd++ttj922rNO3CKO/06SxvTiVj42tVW/vXN2z8zZ9p1VDXaUtdXTu4oZpmyMXxRWKqFvtbqKf0W3U3E1alHufjO7WYx9a4+RqdRRyRdlunmyoqOWVXJnwVHqn1KhzirS84s0V7Zmdcfqt5tHfjmdxpPtCftJtn5jv67iREd0J+0m2fmO/ruJEfJ6j+df6z/AHe3F/BH0AAYuwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Os6KquOkq6ko4VmqJEYjWIqZX12qvPyRTfA7x3mlotHslq90TDk9o7PdQSW6NzrrJb1VV/wA34nerv+9XG/Mzv8nV+/dNJ/Ok/WdKB67ftDNM74+0MI6bHEOcaB05d7LqSulr6Z7YXQuY2ZXIqPXjbvzzuiKp0CSjppqqGplgjfPCipFI5qKrM88L05F8GGfPbNfvniWmPHFK9sNTe7bBPZ7isVFDJUyU8nDiNOJzuFcb+OSM9nVimo7TVxXW2d3Is/Ezv4kyqcKcs+4ngFeotXHOP4k44m0W+DHloKOaOGOSlhcyFyPiarEwxyclTwwQftBtF2ud4s77ZTyOdCqr3yJlsblc3Cr7sZOgAmHPbFeLxzpcmOL17XM7fpvVEWtqe5XZGVaMie11TE5vDvG5ETGEXmqdOpqdN6b1rTRVCW+Rba1zk42z+rxrvunqqdiB6f3hfUx2x4iPHw+X6svw1fjLkrtPaqTWdpqrq19b3UsSuqIk4msYj84VcJy3X4nWgDDP1E5tbiI18GmPFFN6nyAA87QAAAAAAAAAAAAAAAAAAAAAAAqoiKq8kAGqq3tlqOJuVRE4Sp1bNNxt4EYxVwniqeZjKrldwswmObl6AYF8p7hU2eogtk0UFTI3hSaVyokaLzcmEXfHIjnZstwfbKlJalktsgk9HouCFGJIjVXik8VyvivPJNFpkWLL3q9r8tcx6bKmCpjGRsRjGo1rUwjWphEA9NbeKttPTKmd8ZVPsQ2T3JGxz3bIiZUhd4q3VNUrE3wu6J4+AGbpSkfV3r0l27YUV7l/fLsn6V+BPjVaftn0ZbGMemJpPXk8l8Ph+s2oAAAAAAAAA11ZErpFd3krUzjhbIqIuyKbExatNlVeW2PfnH6QNe2CJi5axqL443KlQrPWxuflUTZOaryAsqhi1lbR0DEfWVUFOxy4R00iMRV+JsHwua3i2Vvi1cml1FYKTUNonoqmNivcxUilVuVjd0VF6b494EO19U6bvNrdS+mtqLnG1VpGUrnSu4/BUblN8Y3KNLS6wp9O0tviskECwo5EqK6ZW5RVVU9RE4tslnssuTaZ1dpyrhbFXU8jnovCiK5EXDkVeqov1L5HSVQDj1Td9V2HXNNb6u6tnWrliVzeHMXC92MI1fZxvyxyOsqhzLVsX0l2uWamp95Imw95jpwvdIv/AJSe6gusdktE1Y5vHInqQxpuski7Naidcr+kDAs2qbdfa2po6VtQyemz3jZY8ImFxzRVTn5m5VCD9l8TI7bdGyxuZcW1atqUevrbJtlOm/F9ZOlQC0qFCoXVQxa6R0FHLIz2kbt7yotS1VPE/gfMxrvBXci3PPAkD3OkYrML1zkrhttPBEjXRMkkx673tRyqvXmW0ttIyTjSBvEi53zj5cgLjeNaanWT2+6bxe8ochedlVVV3VS2qARrV2n0v9oVkaIlXDl8Cr1Xq34/bgiOiL+2me+wXNOGNzlbEkiey5ebFRfH7c+J1BUINq/RTrpMtwtvCyrX8JGq4STzRei/aFaashn0JqFKmBHPtVUuHM8E/J96dPL4m1sVtSHVdVW2xU+iZoUdlPZVy4XDfHG/uzgvWaivtyo20WoqWFaSFWqjpMOkkVq7JsuPevVPfklfCjURrURETZEToBaVC25C8qFtyBEw0zfVq2pQ1Lvv7E+9uX8dE6e9CSnJlm9HkbI2Tge1eJqou6KbOjutZUXiS5ySOijWNMcblRuUxnhTw57eYV0YGLbq+K5UMdVCvqvTdOrV6oplEAAAAAAAAAAAAAAAAAAAAAAIhq3Q/wB1FwgqvpH0buou74e4487quc8SeJLwaYst8Vu6k6lzelbxqzmP+SD/APff/tP/APslFs0ZS0elprDWTrVwyvV6vazu1RdsY3XdFQkwNsnW58katb+zOuDHWdxDmi9lVREr46a/yR08mz2LCu6eC4dhfqJHZ9JUWl7NXJA901TLC5JJ3phVREXCInRCUFL2Nkjcx6Za5FRU8UF+szZI7b24K4KVncQ4bovTVNqeavpZ5XwvjiR8UrUzwrnG6dUJhaeyqmpK9lRX1/pcUbuJIWxcCOX98uV28iYWvTtpsssktuo2QPkbwuVHOXKfFTaG/UftHJe0+nMxWWePpaxEd0blGdWaMpdUNhkdO6mqok4Wyo3iRW+CplPtI7H2TsdSPZUXiR82ESJyQ+rGmcrtxb+HNOZ0gHnx9Znx1ilbcNbYMdp3MIlBojuNF1OnfpDi7+RH+kdzjh9Zq44eL9749TP0npv7l7XLRel+k95MsvH3fBjLWpjGV/J+s3wOLdRktWazPEzufqsYqxMTEeEOpNCei6ydqD6S4szyS9x3GPaRUxxcXTPgNUdn1HqCrWthqFpKtyJxuRnE1+OqplN/MmIOo6vNFov3cxGv0T0aa7dcOb27snhjqmy3K4uqI2rlYo4+Hi8ldnl7vmSbVWlWaktlNRMqUo2QSI9vDFxJhGqmMZTHMkQLbq81rxebcx4IwY4rNYjy11htX0JZKW3d933cNVO84eHiyqryyuOZsQDz2tNpm0+ZaRERGoAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh2ou0a1WKp9FiY6uqGriVsLkRsfkrvHy+eAJiAAAIjbdf0V11V9C0tM97Fc9rarjThdwtVVVE8Njbah1HSaco45qhkkssz+7ggiTL5HeCAbgEOh13JBW08F6sVZa4ql/BFPIvE3K8kXZMfWTEACIv19RfdbHYYKZ8yulSFahHojUf1RE645EuAAjmodWx2Wtp7bS0M1wudQnEymiXGG+Krhccl6dF5FNh1e263Oa1V1vmttzjbx9xK7iR7fFrsJn5fPcCSgj2otVxWOppqGCjlr7lU7xUsS4VU8VXC4TZenRSzY9YJcrs+0XG3TWy5I3jZDK7iSRvi12Ez1+XvAk4I/qPVUNgkpqWOllrbhUr95pYtlcniq74T4fpMey6xWuvC2e6Wya13FzeOOOR/G2RPJ2E8F+S7gSgAAACDagvs8V6mhp9Q1dC2JEa6Flp79OLGVVH435gTkEKtVReL3RuioNTSLPBJxSzT2pI+Jrk9VqNXHJWuXKeJZv7tV2C1LcH6ihqGtkYxY0oWNzxOROeV8QJ2DRajrLzb4PS7fJa46SJiuqH1qSKqeGODoc+1Bra6VdjqIW3Wz5dw4WhbUsm9pF9VXIiJ5+WRpNuvA5ouvrk1MrddNYT/wClVf2SeWn6W9Ed9M+hek8a8PofHwcGExni3znP1BWeAAAAAAAAAAAAAAAAYdXOit7tjs59pUKa+bCNhaq5dzx0QwWU6NYjUnkTCY9nP6QKnORjFd4FVPGuGtdzXdy/aUpC1HIquc9U6u/UXWqrVyi4UCiSpY5+GZd0RrUyeJ3zuTGsTxev6ELv1J4JsANZdJ201M/vZFevDlGtTCfM0ul6Bbhd+/kTijg++OVeruifp+Bf1I93rN6Zanwxk32k6ZILGyTHrTOV6/PCfZ9YG8AAAAAAW554aaF01RKyKJiZc+RyNanvVSL1faTpSjkdG+6JI5q4XuonuT5omF+CgSwGBaL1bb9RJV2yrZUwZ4Vc3KK1fBUXdF95ngCxU8CMRz3I1PZ4l5Jn+/BfLVVTR1dNJTypmN6YXAGDL3dOxZKiaOJiJnKrz9xopFqdTVKwUq9zQQ83uRfWX9K+RarLRYrXKvp924cYXukxx49yZX6iiXXNtt1MkFvpeCNqbOmdwpnxxuq/NAM9lqrNPu9JgnWopU/DRcOFx1VEz0No5GKjXxqixvRHMVPBTndw7RJ5uJrJHuT8iJO7b8+ZNLC6Z+nqF1RGscjmK7gXm1qrsB79EW9tx+kG0UDa3Cos6Roj1RfFepdnljp4JJpnoyKNqve5eSIiZVTJNdc7al0aynqHZo88UsSf9bjk1V/J6qnXbplFCE6Itc1zvVw1hWxqxatzm0bHpukfLi+SIifHxJY61NnuTa6rckz4cpTR4w2LPNcdXL49OSY3ztEY1jUa1qNaiYRETCIhSqAc/rmP092lUtXDG9KK8NSGfDV4Ul5Ivhnl83E3VC8qFtyAWVQtyxtljdG9MtcmFQvqhQqFRhwvdlYJVzKxPa/Lb4+/xKnJuVVEKyI1zF4ZWLljvBfD3KURyJNHxcPC5F4XsX8VfAClUKFQuqhbVALSoWnIXZHNY1XOVETxUtMinq28ceIYOssnX3J1AtuwnMoVC+tPRR7JG6d/5cjlRPkhjOakcjeFOFj8ojcquFT3+8DxUPFpUWNsk8zmMfnhZGm6p7ypSuGRqIsUv4Jy8/yV8QLDVp4fwFMzi/Lk9df1GNVSS1G8j1VU3TPQyJo1ilcx3Nq4Md6AbPS17+ja7upnYpplw/K+w7ov6/7jpBxqVOF3F06nQtIXlLnbVp5HZnpvUXK7ub0X9BFSMAAAAAAAAAAADUX7Ulu07SpNXS+u78FAzeSRfJP08gNuDS6X1FHqe0ur46d0CJK6Pgc7iXZEXOfiZt2utJZbbNX1snBBEm+Eyqr0RE6qoGaCCJ2h1ccDK+r0xXQWp6piq4+LDV5OVvCmy5TqS991omWhbqs7fQki77vURccGM5xz+AGYCDJ2hVL6da+HTFxfa0yvpOUReFOvDjl55JZQXaiuVpjulPMi0j2K/jdtwomc58MYXPuAzQQdO0CrrO9qLRpqtrrfE5UWpR3DxY5q1vCuft9xJrFfKPUNrZX0Tnd25Va5rkw5jk5tXzA2QNde71R2C1y3CtcqRMwiNamXPcvJqeZGY+0GanlppLzp+qttDUuRsdU9/EiZ5cSYTG2/iBNwYF4vFJY7VNcax6pDGnJqZVyryRPNSKs7Q5oPRqi66eq6G21LkSOrV/Em/JVbwpjbfny5ZAnIMK6XaktFqmuVVJinibxZburs8kTxVVVCJN7RJ4WQVlw07WUlqnciMrFfxbLyVW4TZefP3ZAnQKY3sljbJG5HMciOa5FyiovUqAAAAAAANde71R2C3LXVyvSFHI31G8S5XlsXLpcEttoqK7uny91GrmxsRVV69E28VwBmg1OnJrtU2WGpvLYo6ub1+7jYreBq8kXKrv1X346G2AAAADRWy/yV+qLvaXQMYygSPhkR27+JM7ob0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHOO0OzUFn0S9lDTti72sY+R3Nz3LxLlV68zo5Du0yiq6/SiQ0dNNUS+kMdwQxq92MLvhBBKYkR1tdajgp9O2t3/SdzXgyi/govxnL4bZ+CL4Gyv8AqL6Eq7XTMpFqZrhUJCxvecPDuiKvJc80MK7aHp7rfH3ZLpcKSpexGZppEbhETGEXGQNFHaaaydpGm7fSpiOGgkTK83LiTLl81XcnFfQUs8kVdLRtqamjRz6fPNHY6ea4Q53WaJrG63t8DbjepaZ1O5X16vVXRLh3qo/GEztt5+ZK9Rvv9sfb620Nkraanyyro0wr5UxhHIuM558vLbmVES1Leau/VNtt19tkljtnpKSPqJ0c/jciKiNRUREbzX7em8v1nfZbVbI6Wgy+6V7u4pWN5oq7K74Z+aoR3UNyuetKBllt+n6+mSWRqzVFdF3bI+Fc7L/hfLc3120PTXato6x1yrqaopadsDH070auEzvnGcrlQI3V2GHTt40RQxqjpPSJnzSf7yRe7yv6E8kQ6acuvuiayK/WOOG5XusiklektQ6RXrTJ6uFRyJ6ud+fgS6a8rZLrZNOxxS1stTGrVnll9ZrWpu523rKqIq9OQGpsTG1PalqOpkaneQRRRRovNEVqZVP5v1jViNpdfaTrImJ30skkL1RN1b6qfVxu+ZReYrhprWztRU1FPW2+thSKrZTt4nsVEREVE/kpv7022PLelw1ZrWlvUtBUUVqtzHJA2pZwvke5OePl4p6qeIFy1tbU9rt6llRFdTUkbIkdzRFRiqqfNfmNao2n1fpKsjREmdVrC5UTdzVVqfJOJfmeX6C4ae1pHqWjopq2jqIe4rIoG8T24xhUT4N+SptktUy1+sdZUFzfb6mitFsRXx+lM4HySL4J70b4p6vmBdpEbVdsdeszUVaS3tSHPTPBlU/nuT4nuvGNhvulq1iJ37a9se3NzVVuU/x4nuo6a42TV9Nqego5aymdD6PWQwty/hzzRPl/N35mMx9drXV1srEt1VRWi2L3yOqmcDpJNlTCe9G+7C+KIBIbxq+xWuWpoqu4tiqo2etHwOVUy3Kck8FQjeiNZ2Wh0tR0lxuiNrGufxtka9y7vVU3x4KhNa600FYyZ81vpZpnsVOJ8LXOXbCbqhH9E6chpNK0kdztUDa1rnq/voWq/wBtcZXHhgCXkN1M+pbd8RLqhG923/VkbFi6+PXxJkc+1LNRJrxIblNXpS/RrXNZSOkzx945MqjPIkLJLdLtQ2mgjop7nFPWXVtNx3iFqyI1zU3RE/Fz9eTOummtS3iiWjrb7RugV7XqjaThVeFUVN8+RFGVkcVogrXTVL6Cj1Qju8m43ujhRqYznfZOn6SU3ntCsbrPVx2u4ulr5InMp2RQv4uNUwipluNlXPwKjcatkjdYqiiWPvn1LeBYmVDInq1eaor9tjmuoVr2aanikddEgajG8M10p5WIiObjLGJxL05HQK6wVF3slvfUwW+W7MijSaWtp+8T2fXRETGPWIHqy2Q0FqrIHz6bbVxqxFgpabu6jKuavq+tnkueXLIglfu76qS1VDbit6kpOFHSMdd6V6KiKipsjcruicjqdDWw3CkjngkY5HNRVRr0dwqqZwuOu5ArjoO5zW6eOGn0+kjmKje6oljfnydnZSbWa1U1ot0UFPSw07la1ZUibhHPwiKvnyBDYAAigAAAAAAAAAAFL3pGxXryRCowLhLxK2nTru73f4+0DFRyyPdK7m7l7ioxq+uht1G+pn4la3CI1jeJz3LsjWp1VV2RDB07da28UtTPW25aHgqHRRxufxKrUxuuNs5ynwA3BE9Z6srNOtjZbqFKyZrFnqEVFxFEi4Ry45ZX7FNpRako7hf6m1UyPkWni43zp7GUdwq1F6qnX5EQo86yulVDE7NLU1DZq6ROSU0e0MOfF+FevgjgOgWypkrrVR1csXcyTwMldHnPArmoqp8MmWERERERMInJABi1dvgrPwiLvsuOpRJQsioFiY5/DG3LUV2yY35GcWayRIqSRVXdU4U96gYFivUrrm+11LuNMcUL154xnhXx6/IkxzuzvWo1lA6Ndkcu/kjVydEAGuvN1baqRH8KPmftG1eXvXyQ2JpNVWZ15ss8UMrYqlsbu6e5cImU3RfBPPyA49qnUU16rXRpM+oRq88+o33JyI56Cjt3qmfJDISJ9LI6mmjWOZi7ovXzRepWBIOzu5QWDU8iz18FLRS07u/7+RGI5UX1ceLsr8lUn9d2raTo2/e6yWrfnHBTwuz83YT6zis9C2eRXq7n0VMnkdvjYuVXK+SYA6NXdsk0yq21WlrG52kqnquU/NbjH85SN12ttQXRHNnr5UjXbu4fvbcL0XG6/HJpWwsamEbn3lzkBIdL2Cs1JUTItR6NTQoiySNbxOVV5Innz9xNGdnVjb+EWsmd+U+dM/U0s9mSNSyV6/jekNRfkn95NFAjtv0XZLdUNnZTOmkauWd/Jxo1fHGET5m/VVcuVXK+JUeKgFJamlZCzjkcjULxjU7EkklqXpxPZIsbEXkzHX3qBaVKyowrGtp4l5Pk9pfchStBGv4WoqJV8nI1PluZq5VcrupQqAYnoNM3dvpDfNJf7i0rXwzsZxufHI1Vbxe0ip59eZmqhYk9etenSFqRonmu6/WBQqFCoXXIWJZoovbe1vlncDxUMWeNzJO/iTL0TDm/lt8Pf4Fxs0tSuKSmkl/fYw35nq0UrkzVVbY0/wB3CmV+ZUY7p4UibLxpwO5L193vLbPSapM08Coz/eybNQvSxxUiNkpafi4HZfx+s5yeKJyyVvndUNbJ3ivY5MtXpgCw2lghdxzO9KmTki7MT4dTyeV8y5eucck6IVOQtuQCyqFqRnGzhzhUXLV8FLyoUOAx8T4/0aR3nGnEn1HqU0z8LO3uIeqv9pyeCIVrsUO3ApqZO+nfJjGV5GO5Nii41PoVuqqrGe5idIieOEyc0sWrq6G8NWvqny0078SI9dmZ6p4Inh4AdGehds1c6zXSOqYq8CLiRvixeafpKXoY70A7Gx7ZGNexyOa5EVFTqhURfRN0SqtrqGR+ZaVcNTxYvL5cvkSgigAAAAAAABgS2agqLvFdJqdslZFH3cb3b8CZVconLO/PmZ4Ag3ZT+1KX+OSfY0ye0q21Ny0ovosbpXU07Z3RtTKuaiKi/wBbPwLHZ9T11p0ZV9/QVDalk0sjKeRisdJ6qYRMp1VMZM5LlqO66PdW0dAtvuyOVW087fbRF39rGMpyz4eeS+6NXdNfWK56ZqKaifJLW1dO6GOkSFyuR7m4wu2MJnx6bGDpttPXdj9TT3Kq9GpWrI3vlTPAiORybdfWXl15Fyq1NWVtDLTW/SVbT32oZ3b5XUyNazKYV3Gu/jjOENhWaMqU7NmWClkYtWxEkXfDXv4uJyZ8N1RM+CAR+k1fqGn0clPBp6Sanig7mOvRjkYsaJhHcGN9k55wZsiU1u7FqhLdWJUtcxEfKiKm75ERyYXdNlVPr6mdTazulLbI6OTSV0W4RRoxGMhXunKiYznGyfBfeV6f0bUx6BrbPcXNjnrnul4W7pC5Ubwpt4K1F2AwLBWavfpukkslst0NvgiRsUdQq95UYT1nbKiJxLlenPmpJdGXWju9nkqKa3xUE7ZlZVQxxo376iJlduecpz36dCPWvVN107Z47PcNOXCWupW91C6CPijlRPZ9ZPgm2f0GXp6iu+m9LXS6z0Lqm6Vkq1K0cfNMrywmd91XCe7mA7QEbUXbS1BK1HU89wRZEdyXCtTC+9HKbXXtPFU6JuaSI31I0kaq9HIqKmPs+JrtS0N11DpW23OmpHU12pJGVbaZ/tIqc279eS4XwxzNXer7ddYWtlit9jr6Weoc1KuSpi4Y4moqKuF96dcLhOW4Fu/yLcNM6IpKhqrFVT0/eq7kvqo3f3o5VJdrSmiqNGXVkiN4WU7pG5Tkrd0+tDA1Zpuoq9J0dLa1zVWx0clOi4y7gbjHvxv70NJd9RXfVNnbY6Gw19NXVPCyqkniVkUSfjYd4e9E28VAxr7K6u7P9JU0zVbHUTwRyKvgjVbv7+fwJxqylhn0ddYXsb3bKSRzUxsitarm/JUQ1ep9LzVeiaa2W93FU29InwZwnG5jeH5qir8TS3PU151DY/oOksFwhudS1Iql8sPDFGn4yoq9F88c+oG407qKgtehbNUXWpSma+LumK5qrnhyickXohpKDWVnj7QLrWS3TFvlpo2wuVHq1XIjc4TG3UnFBZKSmslFbaiCGpbSxNYiyRo5FVEwq4XlkjtBpqNnaBdamW0wfRz6aNIVdC3g4sNzhPHZQJVbrlR3ajbWUM6TU7lVEeiKmVRcLzNVq2Kvmt0TKS7wWmBZE9JqpH8LkZ4NXovxTlzN5BTw0sSRU8McUacmRtRqJ8EIT2gW6sqK2zVzbfLcqCklctRSRJlVzjC4TnyX/CkVpYrm2xaps8Vr1TPd6esnSCogmm73h4lROJF5JuufHbrubK9NvFy7R3Wihu9TRUz6Jr5e7evqtzurUzhHKuEz4Kpq62Ge636wVNs0lPbaCnro1fJ6Ikb3es1VVUamzUROa7b+RKI6OqTtVlrFppvRVtvAk3AvBxcSbcXLPkVEf7QNPutuj4X/AEtcahIHoxWTTcTZOJyrxOTqqZwi+CG21HSVWnOzy4LT3e5TT8cb21E1QqyMy9iKjXJhUTnt5qbDtBtdXdtJT09FE6adr2SJG3m5EXfHwXJq77WV2pOzeuRtnr6erR0Ufo0kLuNyo9iqrUxlU59OigXNR3a5vgsFjtlSsNZdGIslTlVfGxGoqqnmu+/l55Kk0zqGyVtJVWq91dwj40Sqpq6bKOb1VqryX6/eU6jtNzZBYb5bKZZqy1sTvKbCo+RitRFRPNN9vPywVN1Pf71W0lLarHV0EfGi1VTXQ4axvVG+K/X5AZFNWVTu1Wso1qZlpW21HpCr14EdxM34eWd13PLvWVUfaVp+kjqZmU0sMyyQteqMeqMdjKcl5IYl8bX2HXceoILbU11FPSejzJTN43sVFznHwb9ZiQz3a99o1mukllraOgijlYx00aoqeo/d/RuVVERF8PMDOsEjYu0PVkj1wxjIXOXwRGmvstvu2t6ea91d8rqCGWRzaWno5Fa1iNXGV8d/s59Db2W31Ca61PLUUsraWobE1kj2KjJE4cLheS/A1FluF20RTTWSssldXwxSOdS1FHGrmvRy5wvhv9vLqBsdM3m7Mpr7aa13plxtKL3Mn406Kjlbnz2T5oRqzTLfretTJraspL+57sU8s/dxNXOzeHqi7cvHlsSfStsvEUV6vtVTxw3O5LxwU0ucRo1F4Ud1TOU+CfA0N3qVvVsqKa5aHrPp17XMbPBTYYjuSO7xN1ROfVPMDpNvbVst9O2vfHJVtYiSvj9lzuqpsnMyTU6Yoau26ZoKOufx1MUXC/fON9m58kwnwNsRQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAamuscdffrZdJZl/zBJOCLh2c56Yyq+WDbAAAAAAAA1L7FFJqqO+vlVXxUq07IuHZuXKquz44VUNsAAAAAAAAAAAAFlKWnSrWrSCP0lWd2s3AnGrc54c88Z6F4AW4oIYePuomR945Xv4GonE5ear4r5laNa1co1EXyQ9AAxZLZQTVPpMlDTPqMoveuiartuW+MmUAAAAAAAAAAAAAAAAAPFVGtVV5JuppEm76smVU3TH+P8eBtKx/DDwpzcprIfWV7/F2E9ybfrAiOsrXqm53Gg+gZoqeKna57pZHonruTh5YVdm53x+MppKHROrqd8X0jqh8dvhb99jpKiRHcCJuibInx+J08pkjbLE+N6ZY9Fa5PFFA5tQJPSaTud/rHx22mqaRKe300bMughVVxjf1nu4sp4ruq+E7sFmo7FZ4KGhhWKNrcu4vac5eauXqpo7ToGlt9bHNVXKtuEFO7ipKaqkV0cC9FROSqnTlgmCADR3W+MpfVY7Hmm6r7jYXKo9HpHLnCu2z4J1OZXq5PZFLUomXr6sbfs/WBL6bUU8vErI53tTmqR8SJ8jAumoHTMc1HORURcucnCjUObz3jUFZEkM13qWQ8PD3ML+BmPzW4T6jW/Rcbsq9XOVd1Vzv1Adm0rWWG2NfV1t8tkdTI3DY3VcfExvPdM812N3NrzTMWUS6Ryu6JC1z8/FEx9Z89rb4Y6hrWsYmU/JMttLw8pHInlsB2Sr7SaFjV9HhVqY2fUvRiIvuTOfmhE7x2hureKOJ8lTnlHGnBGnv6r8ckJSliRcq3iXzUuoiImERETyArqJ562qWpqXIr8YRrU2ahSAAAAAAAdE7L6hOC6UzueGSp8M5/QdAU5R2dVno+qmQuVOGpifGufH2k/qnV98b8+oHh4VHgFKoWFbLDI6SDhcj/AG43cnefkpkFuSRkaZe9rU81wBR6RFjMscsHirk4mp8UKntVqqi4+BiSTrWcVPSpx8SYfIqeq1OqmYuEw1vstRGpnwRMAWlQx5qaOWTvFWRkioiK6N2M+8ylQtuQDEWigX25qp/l3iIn2BsNJCuYqSPP5T8vX69i+qFtyAUyTSPTDnrw+CbJ8iyqFxUKFQCyqGHGnc1T6f8AEkRZI/J34yfp+ZnOQwq5eDuJU9pkzMeeV4VT5KVFxS25Ni65MKqJ0VULbgLLkLaoXXIW3IBaVChS4ppblcfoe4QyVKr6DVKkavXlFJ0VfJU+WPMDMrKdtXRz0z/ZljcxfcqYOb2i3Utdb67T1WxkNzglc+F67K5cYxnry+S56HT1ITrWwyyK29W9HNqocLJwc1ROTk80+z3BWz0zXOr9P075FVZYsxSZ55btv8MGwehFtAVT6iC4skXLu9SVdsbuRc7fySWPQIv6er/oq/xVDnKkT8Mk/NXZflsvwOsnFZNlRfA6ppuv+kLFTyKuZI07p/vT+7C/EK2wAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4q4RVXkgGpuVS5KpIo28S4x7l/x9h5G1GMa1OSJg8avG58i83uVStALUkzkk7uKPvJOapxIiJ71U9jfOj2pPCjEds1zXo5FXwKaXdsjuqyO+3BlKn3vC9XIqfAAeoeFSARzVE6tgc1F/FRPmu/1HMr1LxSxxdGpxL8Touq8+t72/Ycyui5uEieCIn1AYYB4rsLhEVV8EAtTJh8b/AAXBeLbke9uFYqeeS4AAAAAAAbKh0/d7lhaS3VEjV5P4FRv85djf0vZtept6iSlpk6o6Tid8mov2gQ4HTabsupG4WruU8nikMaM+tc/Ybim0Hp6m9qjfOv5U0zl+pMIByW11rrddaSsb/wBTK16p4oi7p8juL6+nSRyMcsnVO7arue5bgslqpWqkFsoo3dHdwiqnxUqtNQ+S2xtc7D41WJ6IvVP7gPfSKh/4KhnX89OH7QrLg7mlPD+c7iX6jLVVXZVVfeuSkDF9De78NXSOTwibw/WeNoaNjs9xxu/KkcrvqMosVFTBSx95UTRws/KkejU+agXM4bwoiNanJrUwnyKSK3btG03a3d22sWtnzhIqRO8yv53s/WZtTV6kqbbFPbbfRU872OV0NfK5XNXOyeptum/NMcgN4pg1txoaBqurKynp2omcyyI37TkdJqa71us47bqy41NDTNerJIYH9w1HY9VHObvwr45Xmm+NySdpmnrT9AT3GKjY25STxtZKzZZHOVEVF8ds/ICRQ6nprjSVM9npqm4pDhPvbOBr1zujXPwi464I5TawvV7v8tkorXHbp4mq+aSrVZFjbtvwpjfdMb9SbW6hjttspqKJqIyCJsaYTwTmRazxpN2lakqWomIYYIc+atRV/qgSlU235mFcal1LS8UbUdK9yMjRfylM9xrrknrUbl5JUNz8UVE+sDDS0PlTiqa2old1Rr+FvwQrhtVNBK2VGOc9q5ar3q7C+42XNChUKiypbcXXFDkVEyqKnvQCy5C04vOLTgLTjButuhutumop09SVuMpzavRU9yme4tuA5/bNR1Gm6lbLfmvWOLaKoRFX1envb9acvdv59VWSKmdN9IQvTGUaxcuXyxzM282SivdL3FXHunsSN2cxfJf0HPKvs+u0NRw0z4Z4lXZ/FwqieaL+jIVtNCy+l195q2xpGyV7XI1qbJlXLj4Evehg6dsjbFa0p1ej5nu45XpyVfBPJDPk2RVXkEYj03JboCt4ampoXL7bUkb702X6lT5ERdTVMzUkdK2lhX2Vc3L3eaIZunZKe06hpqlrpZXOdwOfIuERF2VUT3KB10AEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALFY/u6SRUXdUwnxL5hXFGvijjeiq1zsqiLjkBiNREaiJyRCpVRqKqrhE3Uo9HjxmnesLvyXLxNUsvp6iVeGaWNI87tjzv8AEC5Rp/mzXKntKrvmuTKcqK9WovseqpS1EREREwhblpo53cTlkY/GOON2FVPPxAvHuyJuYf0eirvWVePzkPW2ylzl7ZJF8ZJFX7MAaLVPDJEro3I7ZueFc75OfVtmuNXX5paCpmR7UXMcTlTw548jrksbIaqlZDHHGiuVVVrd1wbLLlXKuX5gcXg0PqOowrbY9qeMj2t+1cmbH2b35yeulLGqrvxzfqRTrSoirlURVKkQDlSdmN5VP9Mt3u71/wDZMap7OtQQMV0cUFQidIZUz9eDr5i3C4x26BXOVveq1VTK7NTxUDgU0EtNM6KaN0cjVwrXJhULZvNUXSG53FFgw5saKiyY3eqrv8P7zRgbnTFidqG8to+8WOJrFkleiZVGpjl55VE+J1616btNoY30Oiia9P8ArXpxvX+Uv6MHFLZeKux1iV1HL3cjGrnKZRW9UVPA7Vpm+N1Hp6kuiRd06druOPOeFzVVF+C4z8QNqu/PK+8FR4oFKoeFR4oFKoaqlX0e81tMuzZWpO339f0/I2xqbj94u1uqU2RXrE5fJeX6QNmeHqcsZzjY8UClTX3GyWy7rGtxoKeqWNFRnesR3Dnnj5IbA8A4tr/QKWHF8sbXspmORZYkVVWFc7OavPGfl7uU40JrKPVNs7udzWXKnanfMTbjT8tPJevgvwJbLGyaN8cjGvjeitc1yZRUXmiocS1bp6s0DqGC92VzmUTn5j6pG7rG7xaqZx5e7IE713oeHU9J6TTcMVzhbiN67JIn5Lv0L0OeWK/VtZW2XS949VtFcmPR8zsObwIqJGufPZPkdc01qKk1NZ466mVGu9mWLOVjf1Rf0L4HOdeacm1Bqy4PtUTO+oaKOWdrU9aV6qu353Dj34A6wpENIsV941RVdH3JYs/mJ/ea3s912l4jjs9zfi4RtxFK5fw6J4/vkT5m00C1XWSsq3c6u4Tzqvjl2P8A0gSVxiVtOtTSSRNXDlTLV8HJun1mY4tqBh0tQlRA2TGFXZzV/FcnNC44xammnhmdUUaNcr/wkLlwj/NF6KYzq+uflsdsl4//AKj0RqfrKiq61TqajcsS/fnrwR455Xw+BZjopKRnexTzPmRMua9+Wv8AFMFUFBM+pbV10jXytT1I2J6rPcZigWUeyWNksa5Y9Mpnp5fAocUxIsNVJB+JLmRnk5PaT4pv8ytwFpS2pdcW1AtKW1K5HtjYr3qiNTmqmOkMlUzvJnPgp19lrdnyefkgHri2iMWVnHjg4kznlgx5GNoahjouNIJF4XNc7iwvRS/IBZrVe6of3ntIuMeBhOXG6dFM6pXvYGSL7TfUd5+C/b8jBdu1U8QOyW2p9MtdLUquVkia5ffjf6zKI9omo7/TUTesT3MX58X/AKiQkUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWamrpqKFZqqoigiRcK+V6Nbn3qYX3R2P/wCc27+lM/WdRS08xCTaI8y2YLUFTBVRd7TzRzRr+NG5HJ80LpzMaUAAAAAAAAAAAAAAaW6ass1nr20VdVOjqHNRyNSJztlXCbonkbo6tS1YiZjykWiZ1EgAOVAAAAAAAAAAAAAAAAAAAMCuXNQxPBqr81/uM81tUuax6eDU/SBbQ0lHd4pNV3O3S1KMlhZCkUDnY4mq1XK5E6rl2F9yG4kkbFE+R64YxqucvgiHPL7ZdN9o8kNXbb3DFXtZwbJlz280RzFVF2zz+0DpKHqHLLHa9Q6D1Jb6errkrLRcJfR1w5VRkiovDsvJdunNM+R1QD09PD1AMOs2qaVf3y/YZ6cjBq0zVUjfFy/YZyAelR4h6B6mMpnlk5r2pxVjbRUOj4+B0jXPVOrE/RnB0tC1PTxVUDoaiJk0TkwrHplMAfONLL31Mx6rvjC+8vHY3dnGm+8VY6SaFqrngjlXh+GeRm0mjNP0bkdHbInuTrM50n1KuAOR2nS9z1FxRUkPDEqKjqiTKMb8evuQ7TYrPDYrJSWync50dOzh4nJhXKq5VV96ryNi1jWNRrWo1rUwjUTCJ7kPQKQVKUPe2NjnvXDWoqqvggHhSsjM4V7c+GTSxMnviunllkipMqkcTFwrk8XKZP0BbeDHo2/j3jgNiam/OatPBEip3zpmqxqcz1bN3W1NV1cLV5ta/KfDkXaW1U9LJ3qI+Sb/AHkq8Tv7gM1VTK48VPFPcYPAKVPCpSlQPFMS42+mulvmoqyJJaeZvC9q/wCNl65MxeRQoHBZUuvZdq/7250tHLumdm1EWeS+Dk+pfJd+laHljubr1fo0dwXCtVIlcmFWONqNb+k22p9OUmprRJQ1KI1/tQyom8b+ip5eKdRpizLYNOUVse5j3wtXjczkrlVVXHxUDn3aHomSlmfqSyI6N7Hd7URxrhWqm/eNxy8V+fiTDQ9P6Nom0s/Kh7z+cqu/SSV6I5qo5EVF2VF6lpGNjjaxjUaxqIjWtTCInggFDi2pcUtqBbUtuLiltxUWnci2pccW1AxKxeCHv0ReKFySJjwTn9WULkiIjlxy5p7ip7Uc1WruiphSxAquoaZzlyvdo1V802UDxxYnmZAxXvXCdE6qvgh7JOqy9xAxZZ1/Eb081XoVMpm0z+9mck1V0X8WP3J4gWI6dXK2orG784qdenm7z8j2V7pHK5y5VSt6q5yqq5VepbUDCuDOOikTwTi+R41/eQMf+U1FMiVqPjc1eqYMGGOt7tsDaVyK1Md49cMx456gVL/o835zP0mDK5WMVUTKoZ8/DFCkDH94qLxPf+U7y8jXy7sUCednM7n0VbCqY4Xtfj35T/0k1Ofdn0mLnWR/lQo75L/edBIoAAAAAAAAAAAAAAAAAAABj1dfR0DGvrKuCma5cNWaRGIq+WVLETM6gmdMgGs+6Ox//Ord/SmfrMqkuFFcGudRVlPUoxcOWGVr+H34Us0tEbmEi0T4lkgA5UAAAAAAAAAAAGBebvTWK1y3Cr41ijx6rEy5yquERCzp+/0mo7b6bRpI1qPWNzJERHNciIuNveh36duzv1w57o32+7agA4dAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh3ab+02X+Gj+0h2kdB0mo7G6umrJoZO9dGjWNRU2RPH3kx7Tf2my/w0f2lvsu/aiv8Zf8AY0+riy3xdF3UnU9zx3pW+fVvggNbS3bs91IxYahXNXD2PTZs7M7o5P0dOadFOuVmpLdQWCK81Mitp5Y2vjaiZc9XJlGoniQDtbqYn19tpmqiyxRve/HNEcqY/qqazWizw6f0vSycSNbRceF8VRv2Ib2xR1VcVr8TO9/SGcX9GbxXxCQJ2u03pHCtol7nPt9+nFj83GPrJezU9DUaZmvlIrp4Io3Ocz2XIqc2r4Kc9jn1NPpVlqi0rA6hfAiNekTsrlNn+17XXPiVWG1Xa06Q1PFcKSanikpkdGknJVw7OPqM8vS4NbrxMTHG97jbumXJvnnj4J1pXVUWqYamSKlfB3DmtVHOR2cov6jDqNdQU+rUsC0UiyLMyLvkemMuRN8fE0fZEqehXROveR/Y40lxVF7YmYX/AOOh+xpxHS4vxGSmuIjcf0X1r+nW2+Zlj6/1It1vfo8LJYPQXyQOXj9tUdjO3uJ5pDWNNd7ZOkkLqZlugYsssj8oqYXK/wDlUi3a0xrblbla1EVYnquE57oTp1qbc9FpQx8MT6iiYxHImN+FMZx0ydZ5xT02OJrrf9OefqmOLxltyjNX2s0Uc7mUlsmqImrvI6RGZTxRML9eCS6a1bb9TxyJTI+KoiTL4ZOaJ4ovVDm9uk1ToRali2hJKeTCyufEr2KideNvL4/IkOjr7Y6ySsdQ2eO33RlM9yd2vE2RqbqieG+NsDqOlxRjmcdePjE7+8f9GPNebRFp/TTY3/tJt1mrZKOnp31s8S8MitejGNXqmcLlU9xVYO0e23qtjopoJKOolXhj43I5jndG523XpsQ3swo6et1NPLVMbK+GBZGI9M+srkTi9+/1k8u+i7HcbqyvlkkpKlML94e1nEqLs7Cou/n5HObF0uG3o2id68/P6LjvmvHfE8fBBe0pUbraByrhEgjVV/lON/UdrNDFXOiht001M1yok3eI1XeaNx9qkf7TWd5rOFirhHU8aZ/lOJZ2gWe302iJFgo4YlpXR90rGIity5Grv7lNpjDamGuSN74/s43eLXms60llrudNeLbDX0j1dDKmUymFReSovmi7GYQnsse52knoq5RtU9G+ScLV/SpNj5XUY4x5bUj2l7Mdu6kWkABi7AAAAAAAAAAAAAAAADVTI70uZzl5uTHkmE/vNqaudc1EnkoFCtRzVa5EVqphUVNlQ5lqrS9jqtOw3Gnt0VHXTVccDXU+WJvLwr6vLlleR045xe7hFTaWs888ipBTahxULhV4WsmlVdk9yAWa7s2vtNUUtVadRSVL6R6SQQ1yqqMVPDmn1ITPSs2o5KKZupKeCKoZJiJ0KovG3HNcKqc/d7iL6Z7Qqi/a3mo0i4bTKjoqV3BheNqK7Kr4uajlx5J556NnG4FqasigejHKrnrya1MqVRVcUruHKsf+S9MKWLcnFG6oX25XKqr1RPAzJGMnZwTMbI1OXEm6fEDEaqVNxRzVyyFuMpy4lM9C3HGyJiNjajWp0QuIBUinpSVIBUeoeEA7Qe0P7msWy2I2W6yNyrlTLYEXkqp1cvRPivmE5rK+jt0CzVtVDTRJ+PNIjE+akdk7SdIRzd069xK7OMtikc3+cjcfWanTXZ+2dkd31a99zusqcfdVLlcyFF/F4eSr49E6JtkllVpiw1tMtPUWehfHjGO4aip7lRMp8AK7bqCz3ja3XOlqXImVZHIiuRPNvM2JwTX2gpdHzxXezzTJQrIiIqOXjp39PWTfHgvwXzmvZnr6XULHWm6PR1xhZxRy8u+YnPP75PrT3KB0VTW356sstSqLuqInzVENkvM118jWWzVLU5o1HfJUX9AF+jiSCljiTkxqNT5IXlLFFKk1JFJn22Nd9SF9QPCleZ7xJnx9yZPFXfkvyA8U8Pc55HgHhSpUpSB4qlJ71PAPFKSpTEq6pYVZHEzvKiTZjE+1fIC5NLHCxXSPaxviq4MBbgs6q2jppaj98iYb81L7LexH97WO9JqPBfYZ5InUvvc5Uxn1fBNkA1zobnJnjlp6dPBPWchbdRVSJxNunE7wdDhDYKUYVXYTmoGDTVL5XyQzNRs8S4cicl8FQvOMSlc2e4VlQzePLY2r48KYz9hlOKi04tqWpq+Fj+7ZmWVeTI04lLa01dUJmeRtHEv4qes9f1AU1NZDT7Pdly8mN3VSzDBNPTsjkkdSxMV3EmPXXK5RE8NsGXDBT0f+jxev1lk9Zy/qLLXKlZPG5VXvWpK1V8W7L9XCBU3u6eLuqWPu2dV/Gd71LKlalDgLbi24uOLagWnFiV6NjcrnYYm6qq7J5l9xpL9PKlPFRU7kbPWP7lrlTPC3Cq52OuERfmBh6ifXR21au3VLY1gRZXpwoqSMRMqZKuSSDiTk5uUNNBSz0FRVWV9VJUwS0iyROl3Vv4qt926Gyt0vfWqkk/LhYv1IBLdAzf8A4ic1OTqZ32ov6DppyzQjUi1MxE/Gjf8ADZDqZJUAAAAAAAAAAAAAAAAAAA532t/6qt38O7+qdEOd9rf+qrd/Du/qns6D/M1/+9mHU/ypa7TvZxQXmwUlwlrqmOSdquVrEbhN1Tw8iVW6y0mgbLcqyGSeqbwpI5r8Ivq52THvITY9M6vrrLTVNuvToKSRqrHF6ZKzhTK9ETCbkomt12tfZxeILxWLV1Kte5JFldJhuG4TLt+aL8z2dRNrW7LZNxM+P1YYoiI7orqdeWP/AJWLd6E6VaCfv+LhbDxpumOar0Q20mvLfTaZo7xVRSRuq+LuqZi8TlVrlRd9kx5+ZFeyyz0Na2vrKqminkjcxkfeNRyM5qqoi9eW/kbjXNx05aH0sFZZ2VtUjFdFEju7axiqu+U8Vz08TnJhwev6NKTMx8/l4dVyZPT77TDFi7XKR0+JrTMyLPtMlRzvlhPtJZcdT0dFphb9Ai1VKqNVqMXhV2XI3rywv2HM9VXe7XKxQsqtNtt9FHI3upe6c1W7LhEzjZU8jOjVV7FJcryn2/8AFQ7ydJi1S0V1u0RMb25rmvu0b3xvxpt5e1igbQMljt8r6hz1TuVkREa1Mbq7HXPLHQ3OltcUWpppKZsD6aqY3j7tzkcjm+S7fLBpey62UU2nqqqmpopZpKh0auexHeqjW7b9N1I7pOFlH2qupoU4Yo6ipja1PyUR+E+pDm/T9PMZKUrMTXne1rkyx22meJTrUuvbdp2pWj7p9VVoiK6Nio1GZ5cTvHywprbT2p22tqo4K2kkouNcJIsiPY33rhFT5ES07BFde05yV7Uk4qmaRWP3RXJxKifDGfgdG1Do+yXyWGat4qeRiK1HwuaxXp4LlFzj9JzkxdNh7ceSJmZje/8Axa3y5N2rPv4ZeotTUGmqRk1Yr3PlVUiijTLn45+SImU3IjF2uUjp8TWmZkWfaZKjnfLCfaZGtbjp60R0FNXW5bpVsgRIu8k4cR8uJzk6qqdE6dCL6qu92uVihZVabbb6KORvdS905qt2XCJnGyp5F6Xpcdq17qb37719o90zZrRM6nx8nQ7/AHi1T6LluUsCXC3Stb97ReHiy5E580VF+KKhj6SvFmZpOetpaT6NoKeRyPa56vXKIiqueaquUQicSqvYnMirym2/8VDJ0haX3zsxuVuiejJJal3AruWUSNyZ8soSenpXFaJmdRfX6fTwsZLTeJiPZlT9rVI2ZUp7VPLCi7yPlRi48cYX7SV6c1PQampXy0nGySJUSWGRPWZnOPJUXCnMrfW6m0RS1NJUWVslFI7il76FXMXKYX12rjGE5Lkl2gbvYbjNUNt9qbbq5saLIxruJHszzRffjp1L1XTY645tjrxHvE7+5iy3m0Raf00nIAPkvYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjWurVW3nTT6Sgh76dZWORnEjdkXfdVRCCW+w9oVqpFpaGKSCBXK7hbPDzXrniz0OwA9eHrLYqen2xMeeYY3wRe3duYlyuy9m9zrbmldqOZODi43xrJ3kkqp0c7kifFV93MlutNKJqW2RMgeyOrplVYVd7KovNq+HJPkScEv1uW2SMm+Y8FcFIrNfi5NBSdo1NbvoiGKRsDW8DXo+PLW+CPzlPtQl1h0tWU2na6iu1wlqamujVj1WR0jYkVFREbnrvlV/USsFy9Za8aiIj34gpgis73MuO2/TOuNOV0zLVGrUl9VZGPjcx6JyXDuXxTO5ft+iNR0usKSuqYfSI2VLJpqnvWbrlFcuFXK4XPTfB1sGs/tHJO/wAsbmNTw4jpax7zwhPaDpOt1CykqLfwOnp0c10TncPGi4xhV2zt18S3py1apmsdfa7zNLSMSFkdFKx7OKNUz1YuV5N5ruhOgYx1d4xRi1Go8fGGk4a9/e5bTUvaNY3ywQo6tje7aSSVsqZ8U4lynx2NjobRNdabjJdrsrGTq1zWQNVHYzzVVTbx2TxOgg6v1t7VmsREb86jy5r09YmJmZnTk9donUOnr664abVZIsqsfA5vExq/iua7ZU+ZdoNG6iv+oIrlqZUZFGrVcj1aqvai5RiNbsifrOpg7/eGXXiN61vXKfhqb99fD2c11vpW9XfVUNbQ0fe07Yo2q/vWNwqOVV2VUXqSrWttq7vpWqoqGLvah7mK1nEjc4eiruqonJCQAxnqrz2cR+Tx/wCu/Rr+b5or2f2evsmnpKW4wdzM6oc9G8bXeqrWpnLVVOikqAMsuScl5vPmXdKxWsVgABm6AAAAAAAAAAAAAAAADUSN4aqfdVy/O/uNuaqo2rJU9ygeIc5hp7fX3/Uejbu9zGVVUldSua7hcquRHORqrtlF/wDUdDc9sbFc9yNanNVUhGutFN1akNfa6iJlxgbw+s7CSNzlEVU5Ki5wvmBTW0drsN40rp61MRsiVq1L25y/hRjkVzl88/8Al8ifSfgn+5TnWgNA1tkukl4vUjH1nCrImNfxq3OyuVfHG3uVTo/MCxQf6FD+aZSGBSSNp3LSSKjXNX1FcuOJql99SrnrHSsbK5PaersMb5bc1AykPUMalqFm42vbwSMXDm5yZAFRUhSeoBTLK2CF8r/ZY1XL7kPnTS0rtS9p1FU13ruqKxZ3ou6erl6J7tkT3H0XNEk8EkTvZe1Wr7lTB8y6fmfprXdE6r+9rSVndz5/FTPC76lUD6hBSinuQNdqC1sven6+2vRF9IhcxuejseqvwXC/A+b9G1b7frS0TIqtVKtjHeOHLwu+pVPp+SVkUbpHqjWNRXOVeiIfNuirc+/a/okjYvdsqPSpP3rGu4t/euE+IH0mUvaj2Oa5MtcmFTxKjxQNJb5lttQ621C4RFVYHrye1envNlU1MVPC6WV3CxvNV2z5J5ntVSQVsXd1EaPb08U9ymHDZKKGRJOF8jm+z3jsonwAsRUdRc2JPVzTQsduyCJeFGt6Z8ytbKxiZp6qpif0ckmU+KG1KQNXBWT09Q2lr+FHu/Bzps2TyXwU2OSzW0rK2mfC/r7K+C9FMa11L56VWzfhonLFJ5qnJf8AHgBnKeBQBSeHp4oHimHSYWtrZF/DoqMROrWY5p7zLUxKmkSaRs0cjoZ2ezI37F8UAvqW3FuGqkfOlLVsRk7kXgkZ7MmPsUrUChTFrFm9EmSnarpVbhuOe64XHnjJlKW1A10EVVFTsigpe6an49Q5G5X3JuHULZN6upkm/wDpxpwM+PVTMUtuKilisgZwU8TIW/vE3X3qWXKqrld1UuOLahVtepiVarGjKhqKroXceE6pycnyyZalp24RTIjc5aqK1Uy1U6opaU8pGqkMlOu3o65YqrzjXl8uXwKUkZI5Gxua9y7IjVzlQLMsknF3cEaySeGcInvUq9Hq440dURIzPJWrlFMmeNlMxIWORz19aVydXL0MOWRY43ORfZ3VPHAFpxG9STeg1Nsub0VYKaZzZcJnDXt4c/Ak1Q1Y5XNXoYFY6BtNItSsaQ49fvMcOPPIEfpquG5Xea5QuzR08HcpK5MI5yrxOVM9EREKrG5rrJScK5ajMIvki4LVNOy+SKlOxGWmB3CjUTHfOTfl0anh1Llkej7RCqdFenycqATPQycWpmL4RPX7DqBzbQEDn3yWbbhjgVF33yqpjb4KdJEqAAgAAAAAAAAAAAAAAAAEM7RLFcr7b6KK203fvjlVz042twmP3yoTMGmHLOK8Xr5hzekXrNZabSlDU2zS9BR1cfd1ETFR7OJFwvEq802LmpaOe4abuFJSx95PLCrWNyiZX3rsbUD1J9T1Pfeztjt7UK7OrDcrFRV0dypu4fLI1zE7xrsoifvVUwu0DR1xvNfBc7W1ssrY0jfFxo1dlVUciquOv1IdCBtHV5IzTmjz/RnOGs09P2cnuli17qK2o24ta5sLkVlPxRtV7uXEuFRNkzzX3IbWPTN4b2Xy2daT/P3S8SQ94zl3iLzzjl5nQwdz115iIisRETviEjp67mdzzGkV0BZ6+yaekpbjB3My1Dno3ja71Va1M5aqp0U0Fm0reqTtIlu09FwUK1NQ9Je9YvquR/CuEXO+U6HSQcfi7917aj83lfRrqsfByzWGkbja7vPqSzytbG1y1D8PRronc3KmdlRd9vPGDVW+26g7RKqKorqxi0sCqx0q8KcHJVwxMLldt8Y89jqeooq6ayzRW+kpquZ+GrDU+w5vXO6faRXRGjrnZ71UXSvSCnbJG5jaeF+UTKovnsmNt1Pbi6v/AAJtaY7o4ifdhfD/AImo3qfPwWdc6HrbhPSVlmja7uIGwLBxI1URueFUVVxyXHwQ1l0sWvdRW1G3FrXNhcisp+KNqvdy4lwqJsmea+5DrAPLTr8lKxGonXiZjlrbp6zMzueXPY9M3dvZdLZlpP8ApB0vEkPeM5d4i8845J4l/TmlrrBoestVRJJbq6SoWSKSOVFVuzcbsXkuFQnYOZ6zJMTHHM7/AFdRgrExPy05ZTU/aPao5aKON1UyRV4ZZJGS46ZRXLlE8l+RuNA6Lq7BNNcLi5ramWPumwsXi4G5RVVV5Z2TkTsFydbe9ZrERG/Oo8pXBWJidzOgAHjbgAAAAAAAAAAAAAAAAAAAAAAAAAA//9k=", - }, - ], - tool_failed: false, - }, + tool_call_id: "b", + content: [ + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAQAAAQABAAD/4gHYSUNDX1BST0ZJTEUAAQEAAAHIAAAAAAQwAABtbnRyUkdCIFhZWiAH4AABAAEAAAAAAABhY3NwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAA9tYAAQAAAADTLQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlkZXNjAAAA8AAAACRyWFlaAAABFAAAABRnWFlaAAABKAAAABRiWFlaAAABPAAAABR3dHB0AAABUAAAABRyVFJDAAABZAAAAChnVFJDAAABZAAAAChiVFJDAAABZAAAAChjcHJ0AAABjAAAADxtbHVjAAAAAAAAAAEAAAAMZW5VUwAAAAgAAAAcAHMAUgBHAEJYWVogAAAAAAAAb6IAADj1AAADkFhZWiAAAAAAAABimQAAt4UAABjaWFlaIAAAAAAAACSgAAAPhAAAts9YWVogAAAAAAAA9tYAAQAAAADTLXBhcmEAAAAAAAQAAAACZmYAAPKnAAANWQAAE9AAAApbAAAAAAAAAABtbHVjAAAAAAAAAAEAAAAMZW5VUwAAACAAAAAcAEcAbwBvAGcAbABlACAASQBuAGMALgAgADIAMAAxADb/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAJABQADASIAAhEBAxEB/8QAHAABAAEFAQEAAAAAAAAAAAAAAAYCAwQFBwEI/8QAXRAAAQMDAgMEBgMIDQgIBAcBAAECAwQFEQYhEjFBBxNRYRQiMnGBkRWhsRYjM0JScsHRCDZWYnN0gpKUstLh8BckNDU3U5WzJUNUdZOiwvFEVWOkJjhXZaO00+L/xAAZAQEBAQEBAQAAAAAAAAAAAAAAAQMCBAX/xAA0EQEAAgIBAwIEAwYHAQEAAAAAAQIDESEEEjETQVFhcZEUIoEFFTIzobEjNEJSwdHh8PH/2gAMAwEAAhEDEQA/AO/gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAct152U/StPdLvZ7zeILs9HTsgSqVYXuxngRuMpnGEwu2TqQA+K9IVM961fa7Xdr9c6aiqp0hkkiqXI5qrs3CrlEy7CZVNsn11pfTFJpS1voKOpraiN8qzK+sm71+VRExnw9VNvefLfbDpd2lO0OqfTtWOkrl9Mplbtwq5fWRPDDs7dEVD6V7PNVN1jom33VXItSre6qmp+LM3Z3uzs5PJyAce7Y9C1ulaJmoLLeLs6kknVtVFJUud3SuXLXNVMYbnbfqqeJT2EW6l1JXVVZcbxdX3K2zRzRQelqkbmb7qi7u3Tffw8Tvt6tFLfrJWWqtZxU1XE6J6dUynNPNF3TzQ+SNO3Gu7Ku1JG1nEiUc601Y1qbSQu5qidUxh6e5APpbX+lLXf7Ytfc7rcrdHboJZO9o6ju0RuEVVcmN8cP2nC+yXRt117VVVXcr1dILRSKjHOhqXI+WRUzwoq5RERMKu3VPHKT3ty1RLU262aPsju/rL05j3JEueKJXeoifnO+pq+J0nRmmKfR+lKGy0+HLCzM0iJ+EkXdzvivLywnQC9U6bpKnSiadfUVjaVIGQJMyZUmw3GF4/HbdT5v7TtNVWltaW6x6fvl2qpK+Jitp5alznte56tamUxsqp4H1LNLHTwyTSvayKNqve5y4RqImVVTgnZlDJ2g9rl51xVsctHRuVtI1ycnKnDGn8liKq+aooEx0d2RJYKmhudz1Hdq64QKkjom1CpT8WOWFyrkT3pnw6HTTWVGpLFSVD6epvVuhmYuHxy1TGuavmirlC191mm/wB0Fq/psf6wNwCw2spX0XpramF1Lwd536SIrODGeLi5Yx1Nd91mm/3QWr+mx/rA3ANbS6isldUspqS82+onfnhiiqmPc7CZXCIuV2QuV15tdskbHcLlR0j3plraidsauTxTKoBnA0/3Wab/AHQWr+mx/rNjJXUkVF6bJVQMpOBH9+6REZwryXi5Y35gXwaf7rNN/ugtX9Nj/WXqXUVkrallPSXm31E788MUVUx7nbZ2RFyuwGyBh192ttr7v6QuFJSd5ng9ImbHxYxnGVTOMp8zD+6zTf7oLV/TY/1gbgGqi1NYJ38EN8tsjvBlXGq/UptEVHNRzVRUVMoqdQPQYtdcqG2RNlr62npI3O4WvnlbGir4Iqrz2MH7rNN/ugtX9Nj/AFgbgGn+6zTf7oLV/TY/1lcOp7BUTRww3y2SSyORjGMq41c5yrhEREXdVA2oKJZY4IXzTSNjijarnveuEaibqqqvJDVfdZpv90Fq/psf6wNwDT/dZpv90Fq/psf6zKpL1aq+RI6O50VS9eTYZ2vX6lAzgAABq5tTWGmnfBPe7bFNG5Wvjkq42uaqc0VFXZS391mm/wB0Fq/psf6wNwCiKWOeFk0MjZIpGo5j2LlrkXdFRU5oVgDiPbz2gXCwyW6xWSvlpKt6ek1MsD+F7WZwxuU5ZVHKvuTop2qoqIqSmlqZ5EjhiYskj3cmtRMqq/A+c4NMTdpemdb62qIXLVVEi/RbXJuyOHDlRPe1EZ70UDtegNSpq3RFsu7nIs8kXBUIm2JW+q7bplUynkqElPnn9jhqXu6u56amf6sqemU6Kv4yYa9PeqcK/wAlT6GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5l246S+6PQsldTx8VdalWojwm7o8ffG/JEd/JOZfse9WJbNTVOnqmTFPcm8cOV2SZicv5Tc/FrUPplzWvarXNRzVTCoqZRUPjXXlgqezztHmioldCyGZtZQSJ0Yq8Tcfmqit/kgfZZwP9kTpBHQ0mrKWP1mKlLWYTmn4j1+OW582nZNKagp9U6Xt96psIyqiRzmovsPTZzfg5FT4HMu3fUM01LbtD2tO9uF3lYsrE58HGnA3y4non8xfECJdgFsjv2rKu9XKq9IqbVTRQ0sUi5VqK1WI5PJrW8KfnH0kfHOjrxWdmPac1K5FY2nndR17UzhY1XDlTxRMI5PHCH2Kx7ZGNexyOY5Mtci5RU8QOY9umqFsWhH26neqVl3d6MxG8+75yL8sN/lkh7M9Kpo/QtBbZGI2re3v6vx71+6ovuTDf5JzVzf8pf7IFf8ArLNptEz1a57HfLeT5tYd4A5l2raE01WaRvt8faoW3WKndO2qjVWvV7eq4XC8sbnKOwjSFj1Td7s69ULaxtJFGsUb3KjUVyrlVRF35dTvXaX/ALM9R/xGT7DkH7Gj/Weov4GD+s8Dv0droIrT9FMpIW2/ulg9GRqcHdqmFbjwxsfOfb1oywaYSy1Vlt7KJ1U6ZszY3Lwu4eBUXCrhOa8j6XODfsmP9C03/CVH2RgSHsU0ZYKfRln1Glvjfd5myPWqequc313M9VOSeqmNk8TD7d2Wq5UluslPb21uqq2RrKJI/wAJFHxZcq/vVwqb7c1/FNRpntcsWi+yG00kciVt6ZFI1tGzOGOWRyosjuiYVF23X60mfZbphVpE1teallwv95jSZZ85bBE5MpGzw2wi+GMdNwxtG9iGmrJaoHXqiiul0c1FmfMqujY7q1jeWE8VTK+WcHRJ7Tb6m0LaZqOF9vWNIvRnMTg4ExhuPBMJ8jNAHyr26aTsultQ21tlom0cVTTOfJGxyq3iR2MplVxt4bbHZey/Qmm7ZpawXuntcX0pNRRzuqnqrno97MuxldvaVNuhzT9kp+2Gx/xR/wDXO29n/wDs601/3ZT/APLaBnXvTNk1GyJt5tdNXJCjkj79iOVnFjOF6ZwnLwQ+TrNp22z9tLdPzQK+2su8tP3SuXeNj3IjVXnyREPsY+TbD/8AmOX/AL+qP+Y8D6AreynQ9bQvpHado4mubhJIGcEjfNHJvn3nC9IajuvZl2pSaZkrZZ7R6d6JLC9ct4XOw2Vqfiu3RVxz3TwPqVzmsarnKjWomVVVwiIfKlPRL2h9vs81uastD9IJPJM1PV7iJURXZ/fcKInm5APpq96ftOo6NtJeKCGtga7jayVueF2FTKeC4VT5J1xp222rtbq7FQwrDb0q4Y2xo9VVrXtYqoirlfxlPsg+SO0+eOl7dLhUTO4Yoqume92FXDUjjVV2A78nY7oBGon3OQ7JjeaX+0c37ROzC1aHqrTq6wMlgo6OvgdV07pFe2NvGio9qruiZTCoqrzQndZ25aCgop5ae8uqJmMV0cLaSZqyOxs3LmIiZ81N12dXKu1F2d2m43mVtVV1LXySPWNrUX747h9VERNkRvTp4gSiop4aumlpqiJssEzFjkjemWvaqYVFTwVD5r7e9HWDTC2SostujonVSztmbEq8LuHgxsq4T2l5eJ9MHBP2TH+jaa/PqfsjA3vZN2e6Urezq2XGuslJWVlW18kstSzvFVUe5ERM7ImETkbbUHYjo68U0i0NEtprecdRSOVEa7plirw492F80Nh2Pf7KLB/BP/5jycAfOWnO0bUfZnqx2lNZTPrLfE9Gd+9Ve+Fi+zIx3NzMY2XdOmFTC/RccjJY2yRuR7HojmuauUVF5Kh88fslbbHHc7DdGtTvJ4ZYHr4oxWub/XcdL7GLvLd+y61OncrpabjpVcq5yjHKjfk3hT4AaTth0JppdFXu/stUMV1jRsyVMSq1znK9qKrkRcLnK806nPuwXRth1RJe6m90DK1aTuWwskcvC3i48qqIu/spzOydr3+ym/8A8C3/AJjTnP7Gb8Bqb86m+yUDu9NTw0lLFTU8TYoIWJHHGxMNY1EwiIngiF0ADl/bjqGa36RhsNBl1xvkyUsbG+0seU4se/LW/wApScaWsMOmdLW2yw4VtJAjHKnJz+bnfFyqvxPn7UGs5br24LeaazVd7oLG5YYKelRV3bxJx5Rq/wDWKqovXCeBOf8ALXeP/wBOL58n/wD+YHI7oyTst7aXSwtVtNR1iTRtantU0m6tT+Q5W+9D65iljnhZNE9HxyNRzHNXZyLuiofJ3a1fqvWFXRXiXSdys7qeJYJZqljuF6ZyxMq1MKiq7358jtnYhqX6f7OqanlfxVVsd6JJld+BEzGvu4VRP5KgdIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAORdv2kvpnSMd8p481dqcrn4Td0DsI75Lh3knEddLVVTQ1tJNS1EaSQTRujkY7k5qphUX4KB87dgeu6a0Q3Wx3WpbFSMifXwPeuzeFv3xv81Edj967xNt2T0VRrvtDvHaFc417mKRYqJjt0a5UwiJ+YzCe92TjuoNGXCz69qNLQxPmqfSUiptt5WuX1F+KKmfDfwPsDSenafSml6Cy02FbTRIj3omO8eu7nfFVVQOGfsidJei3Sj1TTR4jq0SnqlROUjU9Ry+9qY/kJ4mz0f2rJRdiVwdPOn0vaGJR06OXd/HtC7z4d8+Ufmdc1npuHVukbjZZeFHVES909fxJE3Y74ORM+WT5N0Jo2o1H2h0tgq4HsbDM5a5qphY2Rr66L4Kqpw+9UA+h+xLSztPaDirKlipX3ZyVcqu9pGL+DRfh63vcp0k8a1rGIxjUa1qYRETCIh6BFe0v/AGZ6j/iMn2HIP2NH+s9RfwMH9Z5P+1jW2naHRl8s77rTPuc0DoG0kT0fIjnflIns4TffByjsE1VZdN3m7x3mviom1cMaRSTLhiq1VyiryT2uvgB9QnBv2TH+hab/AISo+yM7gy4UUlu+kWVkDqHu++9JSRO74MZ4uLljG+T5y7ftYWLUclmorPcIq11Ksr5pIV4mN4uFERHclX1V5Abiw9lNp1n2LWqqpKeKlv3dSPjqm7d65JHojZPFFRETPNNvcuj7JO0Op0Re5NJalV8FA6ZY077ZaObOFz4MVefRF38TonYlq6xVWh7TYG3CFl2p0lY6lkdwvd67n5ai+0nCudvBfA1/bp2dQ3e0zart7Wx3Cij4qpqbJPEnX85qfNNuiAdmRcplOQPn3sj7ZKShtjbBqusWJtOiNo62RFcnB/u3qmcY6LyxtthM91+mLZ9EJdluFMluWNJPSllRIuHx4uWAPnz9kp+2Gx/xR/8AXO29n/8As601/wB2U/8Ay2nz1276ps+ptS25LPWsrI6SmVkksW7OJXZwi9dvDbc7P2X6007c9IWG0091pvpKGijgdSPejZVcxmHYau6+yq7Z2A6CfINNbo7v281VBLNPDHPe6hiyU8nBI374/druin1RfdT2TTNO2e9XOmomPRVYkr/WfjGeFvN2MpyReaHyZZ9SW2n7ZW6jmkcy2uu0lSsisVVbG97lRVRN+S5xzA+g6nsjjr4VpbjrPVlXRLstPLXorXt8Her6xLdOaUsmkqFaOyW+OljdhXuTLnyL4ucu6/Hl0L1l1DZ9RUzqiz3KmrYm4R6wyI5WKvJHJzRfebMAfJfaS1r+3ura5Ec1a6lRUVMoqcEZ9S3e+WqwUiVd2uFNRQK7ha+eRGI52M4TPNcIuyeB8ha11LQXbtWrNQUSvlofS4pGOxhXtjRqZRF8eHKZ8QPrSu0pYbjQzUlRZ6F0UrFY7/N2ZTKYyi42XzMXQun6nS2i7dZKyeKeeka9qyRZ4XIr3OTnvyVDTs7Y9APja/7oom8SZw6GVFT3+qQrtN7ZbLVaZnsulqx9bXV7e5dNHG5jYmO2du5Ey5U2THiu+2FDt5wT9kx/o2mvz6n7IzttDFHZ7FTQ1E7WxUdM1sk0j8IiMaiK5VXptlVU+de37WFi1JPZaOzXCKtdR986aSFcsTi4OFEdyX2V5Adg7Hv9lFg/gn/8x5ODkHZJ2h6UpOz+12muvVLRV1K17JI6p/dpu9yoqOXZUVFTqSq7dreh7RA6SS/01S5E2jo175zl8E4cp81QDnH7Jepj7jTlLlFkV08ip1RMMT9fyJn2FW+Sh7LaF8rVatVNLOiL+SruFPmjc/E5qun9QduGuG3qqo57ZpyJGxxySphVhRVXDM+09yqqqqbJnrhEX6MoqOnt1DT0VJE2Kmp42xRRt5Na1MInyQCIdr3+ym//AMC3/mNOc/sZvwGpvzqb7JSTdsmttOxaGvFjZdaea6TcMKUsL0e9rkeirxY9nCIvP3HPuwDVtj05NfKW83CGhdV9w6F87uFjuHjRycXJF9ZOYH0uQ/tO1T9yOg7hcI38NXI30el3371+yKnuTLv5JvLhqSyWm3wXCvu1FT0dQiLDNJM1GyoqZThXPrbb7dD5s7WO0O2601ZbqKCWR+naCVO8kaiosyqqcb0TnhGphPivUDr3Yhpj7n+z6nqpmcNXdHelyKqb8CpiNP5vrfylOkmjsGqtN35jYLHdqGqWONHJBDInGxiYTPBzREyicvA3gGi1np9uqdHXSzOROOpgVIlXkkiesxfg5EPnXsF1E+xa/ks9Sqxw3Niwua7bhmZlWZ8/ab73H0td7/aLBCya73OkoY3qqMWolRnGqc0TPP4Hx/ra6W+HtPuN30zU8dO2sbVU8zUVE7zZ7lTPTj4sAfaIIRo/tT0zq6npY4rhFTXOVqI6imXgej8btbnZ/lgm4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGFJZ7bLd4rtJQU7rjFGsUdU6NFkaxeaI7mnNfmvipmgADCprPbaO41VxpqGniravHpE7I0R8uOXEvUzQAAAGlq9IaZr6qSqrNO2ipqJVzJNNRRve9fFVVuVLP3CaP8A3KWP/h0P9kkAAxmW+ijt30cyjp20PdrF6MkTUj4FTHDw4xw42xyNR9wmj/3KWP8A4dD/AGSQADT0WktN22rjq6DT1ppamPPBNBRRse3KYXDkTKbKqfE2k8EVTBJBPEyWGRqtfHI1HNci80VF5oXABp/uT03+5+1f0KP9Rmvtduktq219BSuoFbwrSuhasSt544MYx8DLAEf+4TR/7lLH/wAOh/smRRaS01bauOrodPWmlqY88E0FFGx7cphcORuU2VUNwAMKvs9sujo3XC3UlYsWUjWogbJwZxnGUXGcJ8kMT7k9N/uftX9Cj/UbgAYdDabda0kS32+lpEkxx+jwtj4scs4RM81MwADCuVotl5gbBdLdSV0LHcbY6qBsrWuxjKI5F3wq7+ZgRaL0rBnutM2aPPPgoIkz/wCU3gA0/wByem/3P2r+hR/qPW6U041yObYLUjkXKKlHHlF+RtwBRNDFUwSQTxMlhlarJI5Go5r2qmFRUXZUVOhovuE0f+5Sx/8ADof7JIABH/uE0f8AuUsf/Dof7JkUmk9OUEneUen7VTP/ACoaONi/NENwAAAA0lTo3S9ZUyVNVpuzzzyuV8kstDE5z3LzVVVuVUtfcJo/9ylj/wCHQ/2SQADW1mn7LcKOno62z2+ppaZESCGamY9kSImERrVTDdttjB+4TR/7lLH/AMOh/skgAGrtumrDZ6l1Ra7JbaGdzVYstLSMicrVVFxlqIuMom3kbQADAudjtN6bG262uir2xKqxpVU7JUYq88cSLjkhrvuE0f8AuUsf/Dof7JIABpKbR2l6KpjqaXTdngqInI6OWKhia5ipyVFRuUU3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKFmjauHSMRU6K5AKwW+/h/3rP5yGm1BrLT+loopbzcW0zJcox3dvfnGM+yi45oBvQYNmvFBf7TT3S2T9/RVCKsUvA5vEiKqLs5EVN0XmhnAAAABS57Ge05G+9cFPfw/71n85ALgKWyMeuGva5fJclQAAAAAAAVURFVVwidVLffw/wC9Z/OQC4ChJolXCSsVV/fIVgAAAAAAAAAau/ajtOmKBK681aUtMr0Ykisc5OJUVceqi+ClOntTWfVdudcLJWelUrZViWTu3s9dERVTDkReqAbYAAAAAAAAGDd7zQWG3PuFzqO4pY1RHScDnYz5NRVNdprWuntXuq0sNxSs9E4O+xC9nDxZ4faamc8K8vADfg19LfbTW3WqtdLcaaavpUzPTskRXxp5p8U+ZsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHL7f2UUNw19qPUOpKGKqiqKhEoYJHcTeHhTL3Ii887Ii8sKuN0OoAD537NtJ2C7dqOuLdX2mlqKOkqZWU8L2erEiTOaiN8NkRDeS9nn3E6Z7TGRRo6z1tFHJQq96OVOFsiq1evqq5MKvPbrko7Jf8AbH2hfxub/wDsPOkdo/8As21H/wB3zf1VAgugO0TSmk+zHT1JebvHBUuievcsY+R7UWV+6oxFx8TqtrutDe7ZBcbbVR1NHO3ijljXZycl9yoqKiou6Khzrswslsl7D4Y3UUOK+lnWqVGJmVeJ7cqvVURERPDCEW7ObpWWz9jtqCspHvbUU8lSkLm848sZunuVyqB064dpWkrZV1FNUXXifTO4ah0FPLMyFfB72NVrfipJKKupblRQ1tFUR1FNM3ijlicjmuTxRUOOdmMepU7LqOktmnLPVW+rZN3ks1e5jpuJ7mu4mpGvhw8+SIS7sk0pe9G6TmtN7kge9Kt0sCQyK9Gsc1u26Jj1kcvxA2naDp21ag0hcUudGyd1LSzSwPXKOiejFVHNVPchzTsQ0bpu/aBkrLtZaOsqErZGd7NGjncKNbhM+G6nX9UftSvP8Rn/AOW44j2TaLm1P2Y1ndalvVtWSqljSGlnRsKrwt3c3hyuc74cmQJJpfT+ndJ6ordf2ishi0fVW10fFh/3qVZmIuGqmeDLF38/DBK3drOhm2x1wXUEPo7Ze5z3UnE52EVURvDxKmFTdExuXuzO01ll7OLTbLlTrDVQskbLE/fGZHL9aKnzOadi+k7Hf9MapprlbopmT1y07l3a5I28LmtRU3TDt9vBAOv1mrbBb7DBe6q6QRW6oa10Myqv3ziTKI1uMqvkiZMey6507f7i+3UFevpzG8a01RDJBIrfFGyNRVT3HJb/AA1FH25aasFnt1PPTWig/wCj6KqnVkeeB7ldxYcuUwi533YhIb5pTW2o9caa1BJbrVbn2qdqyvhrnSOli42qrfYTpxpjrxKB0C+6tsem5IIrnXJHUT57mnjjdLLJjwYxFcqeeMFVg1XY9URzPs9wZUrA7hmjVrmSRr4OY5EcnXmnRTj2lbjqCv7Zda3CgtlFX1lNKtKz0yqWHuYmvVqIzDXc0YmeX1ko09pHVMPa3Uaur6S30NJWUyw1MFNVOkVyo1ERd2pndrVA6ZV0lPX0c1JVwsmp5mLHLG9Mte1UwqKngfPmnNHaeX9kHfbFJa4JbXBTOkippU42sVUiXbPm5fmfRJwGmtK3n9kpqKlS419Bim4++oZkjk2ZDtnC7b/UgEhv3Zppm56no26VhpKC8WSrpamtgYjmsdA5yuROWOL1VVMfHmhN7p2g6Vst7bZrhd2QXBzmMSFYnquX44d0aqb5TfJHNFaUqdDap1hXXGvqai2TxU87LjXSo57kakiv43eLc88JtgjvbdHRXau0HJwx1FNVV3Cjk5SRPWLbPgqKBOI+1jQ816baY9QQOqnSd21Ujf3au8O84eH45wSysrKa30ctXWTx09NC1XySyuRrWonVVU5H+yDtlFF2dUEkNNFE6lro2Q921GoxqsflqY5Jsm3khi9tdZPVUeibLNI9KK51LXVaouOLh7tEzj+EcvwTwA6DRdpekq+upqSG6K19U7hpnzU0sUc68sMe5qNcuVRNlKYe07RtRdWWyK9sfWvl7lsKQS8XHnGPZ8SjWvZ5Q6zprRTvq5qGK2zJJGynamFbhE4fLZEwvQg/azTO0drrTvaFRxYjbMlLcEYnttwu6+KqxXplfyWgdFvevtMaduSW67XVtNVuajmxLDI5VReWMNVFL971lYdPVEFNca7hq504oqaKJ80z08UYxFdjnvjopoadKfV3aY2ub3c9u09TI2CTGWvqp0Ryqi9eGNGe5XkS7HZFvfaBrq+V/wB8uDalsDFf7UUaukThTywxifyQOk0OorJq2x18lsq46yFrHwzxuYrXMXCorXsciKnXmhBP2O3+zio/7yl/qRkl0/oGl0nc9T3eCunqH3hzpnxyIiJHu92Nue713ObdmtxqrT+x51LXUTnMqYpqhY3t5sVY404k92c/ADqVw7StJWyuno57r3k1N/pCU1PLOkP57mNVG/FTafdVY1067UDblDJamtRzqmNVe1EyibomVzlcKmMp1Il2IW6lpOyu2zQsZ3lY6WWd6Ju93eObv44RqJ8CN9kyra+0/XOn6TKWuOd0scSexG7jVMInTZce5qeAEzXtd0IlDJWJqGFYmP4FRIpONVxnZvDlU80TCeJv9PaosmqqJ1ZZLhFWQtXhfwZRzF8HNVEVPihy/sHoqVlTq6VtPEkjbisTXoxMozLvVRfDyMa2U7dN/smqigtTO7o7lSOkqIItmNXu1fnHL2m5/lr4gdPvWt9PWCuSgrq9fTVZ3i01PDJPI1v5StjaqonmuDOseobTqW3pX2avirKZV4VfGq5avgqLui+SocT7HrlqiuTUN6t9ot9fV1tdmpnq6x0L2rjKMREY71U4l8PDGxsYLDqvQ9Fr/Uk0dJRx3Clknhho6hZO5mVV9ZMtTlxOUDolx7R9KWy4T0M90WSop95201PLOkKdeNY2qjfic77FK2hbqTtGrqeRiW5Ktk0b2NXh7rjqFRUTw4STdhltpqPsuoKqJje/rpJZp5Or3JI5iZXyRqJ8zRdi0MdPrftHghjbHFHcmMYxqYRrUkqERETwA3+in9nldrq73TTFZ6ReqqJ0lUiJIjWtV7VcqcTUTd3Cq7kjvOutPWKudQVla99YxnePp6aCSd8bfFyMavCnvwc/0fGyP9khrJrGtai0SOwiY3XuVVfiqqpk0VdY7R2l6gdpOiud+1DWLitjSVrKWlVF3R0jk23/ADuSongB0awaktGqLalwstdHV03FwK5iKitdzw5FRFRd02VOptDi/Yas7dUdoEE8ccLo69iughdmON/HOjkauEymyIm3JEO0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADCuzro23SLZo6OSuynA2skcyPnvlWoq8vIzQBxrSmge0PS+r7rf2z6anfdZHvqoXTToiK56vXhXu9sKq88k/wBeWq933StXaLJ9HpJWxuglfWyPajGOTCq3ha7K+/BJgBzrSmnNbab7Pn6eVLBNUwNWOkl9ImRiterlcsn3vOU4kxjn1x1xuzfQF90zpu46Z1D9E1VorEkcrqaWR0mXta1WqjmInDhFXOcov1dOAHJdP6Q7Q9BRz2rT1ZZLlZnyOfTpcVkZJAq+PAnLxRFXPNMZU6Bpq0V1qopn3W5SV9xqpVnqJMqkTHKiIjI2L7LERETxXmpugBHtY0moLjYp6DT7baklVFJDLJXSPajGubjLUY1cruvPBFeyzR2rtDUj7RcpLLPa3yun46eWVZmuVqJhEViIqeqnnz5nSwBiXJ1wbbpltUdNJXYTum1T3MjVc/jK1FXlnkhzrsw0VrDQ89VTXCSyVFurJ1qJnwTS96x3Dj1UViIqKqN2VUxvz5HUABz7X/Z7W6hvFt1Jp6vit+oLdhI5Jmqscrc5RrsIqpjLui5RVRfK7RWzXt7q6Vupqu2W6308jZpI7Q+VJalzVyjXOcvqszhVRN1xgngA5jfOz+/27XcusdE1tDDV1TFbW0Vcjkim5ZVFamd1RF6bpnO6oSCxWrVVXeY7vqmtpYfR2OZTW62PekWXbK+VXe27GyJyTnzJcALNYtUlHMtE2F9UjF7lszlaxX424lRFVEz4Ipx+h0D2i0PaNWazZPph1VVtVklOs0/BwKjURE+95ynC3fyOzADlustPdpurrDLZ++0zQU06p3zoKmoV72oueHKx7IvXbflyyY2tez/V9/k07DbJbHDTWJI3QOnml45JGtZniRGKiNyzbC7p4HWwBzLtH0hrLXWnaK0xfQVM1rmT1L3VEy/fU4k4Wfe/ZwqLld8528cjU3Z9X630FR2y9y0VLe6JUdBUUavfEiomPxkRcOTGfBUTng6KAOb0do7TbhQRWW93O0UtGjUjqbjQOkWrmYmM8OURrXKmUV2Ns5RDadplJbKjsvvVNXSr3MVPiNyuV7++bju0yq5Vyu4U8Vz5k0Of2vsks1t1TU3pa64VMc1Wtb6DNIiwpPlVR6oiesrVcvDnl5gbXs40v9yOhrdbJGolUrO+ql8ZXbqnw2b7moRWs7PtS6c11Wan0PV2/u7hlay31/E1jnKuVVqtTxyvTGV5ouDqoAg0Fm1q6mrrpWV1tlvc8Po9PRNlljoaeNVyrlwiue/zVOmEwhqezbQF90xp65aa1D9EVdnrEkcq00siyK57WtVqo5jU4eFF3zlF+rp4A5jpzTGt9B2+osllW03W1rI59FLWTvhkp+LdUe1rVRyZ32VN1XlnCbrs+0Iuj6evq66rbXXq5zd/W1LW4arsqvC3yy5Vz1zyTZEmhiXOmqqy3TU9HXvoKh6IjKmONr3R7pnDXIqLtlN06gcL7JZNU09z1XNY6a3VlItwc2Wnqp3Qua/LsOa5GuymOaL4Jg6Ho/Qlbb9UXLV2o6qnqr9XJwI2mR3c00eycLVduq4a1M4Tl5qq2tH9mM2jLnLVUOqK+WGpk7yqp5YY1bOu/NcZRd+aYOggcpZoDVOjdV3G7aHqrbJb7k7vKi23DiajXZVfUVqdMrjdMIuMLhCT2nTd3uDbjU6xrIamavplpFoKJz20sMK80RHbueud3LunJMIS8Aco01pDtB0K2ezWOtslfZXyOkp5Lh3jZIM88tYm/uRd139XKnuhdCaz0fqm9XCaustbS3adZahyrI2Vyo56tcjUbwtVeNcplUTPPY6sAOU2DRWuLZ2n12rqp2n3RXFEhqoIqiZVZFlm7MxplyIxOey78umPY9B640dq2+1On6myTW+7zd6sld3ivj9Zyp6reapxuTnhduR14Acu0RoXVejNaXeqWstlwtd2lSaqqJOKOfiTjXKMROFF4nu2zjHhyOogAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHJ5u0zUH0lPSU1vopVZI5rWtikc5URV8HFxvaBq9XIi2OLCr/ANll/tHv/d2b5fd5/wAVjdUAB4HoAAAAOadoep7zZdQU9Nbq10ELqVsitRjVy5XvTO6L0RDbp8Fs9+yrPJkjHXul0sEM1zbdTV81CtimlbEzPeNim7pUdlMKq5TKf46kupWzMpIW1D0fOkbUkc1MI52N1T4ktjitK27onft8Fi0zaY14XQCO64uVXadLVFXQzLDO17Ea9ERcZciLzOcdJyXike62tFazaUiBFtAXWuvOnHVVwnWaZJ3M4laibIibbIniSkuXHOO80n2KWi1YtAADN0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8VcIqnJKbtN1LWSrHS2yjneicStigkcqJ44Rx6MHTZM+5p7M8mWuPXd7uuA5hQdqNbBcG098tscMaqiPdE1zHR+atcq5OmQyxzwxzRPR8cjUcxycnIqZRSZumyYdd8eTHlrk/hVgAwaAAAAAAAAAAAAEd1lZq+82ZGWypfDVwv7xqNkVneJhUVuUX7fA7x1i1orM6+bm0zEbiNpEDmNRR66v8VDbainWgip1RJaps2FftjiXDvW28Oq/LpUESQQRxI5zkY1Gorlyq4TG5pmwxiiPzRM/JzS839tLgAMGgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4Lar2zT+sp7jJC6ZrJZW8DXYVc5QnNH2qUtXWwUyWuZqzSNjRyyptlcZ5ELsNwoLXriWquWPRmyTI7LOPdcomx0FuutHI9qtVqORdlSkXZfkfe6ulbWjeObceY2+dgtMRP5ojlG+1WonhvlE2KaRiLTZw1yp+MphX3S1+Wy/dJX17ZJeFsjoUVeKNi4RMLy2ym32mT2tf69of4t/wCpSb6s/aBW/wAWb9qHFM1sWLD2+/8A26tSL3yb9kc09q+sg7OrhW1Eiz1VE/uonv3VeLCNz44VV+CEasGn77rKWouS3J0fdv4e/le5VV+M4THLGU92UwZembbNdezq/U1O1Xzd8yRjU5uVuFwnnhFKdEa3pdNUNTQ19PO+N0qysdCiKqLhEVFRVTwQ17ZpGWcEfm3/AE4cbi3ZGSeNMFk14g19RUt0qpXVMVZBFIqPVUciK1EXzymF887mw7V/21Uv8SZ/Xeatbm+89o1HcXwuh7+ugc1juaNy1G/UiG07V/21Uv8AEmf13mkRMdRj3Gp7XEz/AIdtfFsO1iomhr7akU0jEWJ+Ua5Uzuhf13fLjbtPWWmpJ5IW1VPmWVi4c7DW7Z5pz3MTtc/1hbP4J/2obrUl1slJp+00d8ttRVQzU7HxviRPVcjUzheJFRd0+Z5ceox4J7d+eG1/4snOvCK0OlZqyhp66yalhnuUiNdJAkvdvavXfizlPNEJRqtLm3sxe28cC1zXsbI5ioqO9dMLt5YIRfrXpmC2R11lvEskrlT/ADWVMvTPPdETGPP5m+lrK2t7HZZK173q2oayJ71yrmI9uN+uFynwNclbWtS++O6PMalxWYiLV+Xx3DV6a0xeNT2KSOGuZT0EEruGN2cSSKiKuUTyxuvw6mx7ObtX0eppLJVTPdE5Hs7tzuJGPZ4eHJSSdlv7Un/xp/2NInpj/a1N/Gar7HkvknJ62O0cRHC1r2enaPMuxAA+C+iAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8d7K+44p2a19HbtSTzVtTFTxLSOaj5Xo1FXiYuMr7lO1u9lfccI0Rp+k1JepaOskmZGyndKiwuRFyjmp1Rdt1PqdDFZw5e/xx/wAvJ1G++mvLcdpl7tV3qaFlvmZUSQI/vJWcsLjDc9eSr5ZL+qbZW0uhLBWo+aOWnibFM1HKio1yZbn3Yx8SXWrs7sNqq2VTY56iWNeJnpD0cjV8cIiIvxN3fba28WOst7sZmiVGqvR3Nq/BURS/jMdJx0x77az5n5p6Frd1reZRe3ajVvZW64ukX0inp3U/FnKo9PUaq+e7V+JHtCVE1ssd61HUvklbBH3ULXvVUc7ZVT5qxM+8h7btPT6eqbI5HNa+qbMqeCoioqL8eH5HVnaclh7Ln2mKNVqVpu9c1E3WTKPVvvymDfNjpgrNZ/12/ozpackxMf6Y/qg1ms161/V1VVVXJzWRKmXyZciOXdGtamyISHSUeqLBqZbXWw1dRbXOVjpVY58bdvVc1y8k5Z9/ihrOzrVVuscFZR3KVYGSPSWOTgVyKuMKi4RV6J9ZJrTr9981Q210NvR1Krnf5w56ovAie1jG2envQdTObd6RSOyI+3zgxenqtu78yJ3q53XWesHWejqXRUqSuijYjlRnC3OXuxz5Kv1F2fTmqdG3GnltMtRXRLlVSCNytXxa9m/P/HIwbfVJpDtGlfXtc2Fk0jHqiZXgdnDvdui+4l157T6KmqIYbPB9IcSes5eJiIvRERUyqnd/VrNceGkTSY/T7ua9kxNrzq22q7Uayfis8kbpoO8he5WZVqpnh2VPFDFv+p6u801r07ZXvkesUSTPjdvJJwp6ufBOar4+4v8Aas6Rz7M6ZiMlWF6vai5RrvVymepo6u31uirlarxSKr4ZomTRvcm2VanGx3zX4L4oXp6UnDjmf4udfUy2tF7fDjae1lkXTnZzcYfSJJKt0PHNNxru7KbJ4InI0+g74y06Qu1xrZXyNhmTha52Vc5Wphqe9SQ3y70187OK6vpHZjkg3avNjsplq+aHLdM26r1DWwWRj3No+9WonVPxURERV9+Nk83GOCnq4L+tx+bn9HeS3Zkr2fDhfsd3r7jregqKipkV89Yxz2o5eHd3JE8PI3ms6mePtIpo2TyNZxQeqj1ROaGNWwRUva5T08DEZFHVU7GMTk1EaxEQt9osz6fXSzx4442RPbnxTdD0xq+asxGt1Zc1xzv2ltu0zVD3VTLLRTOa2FUfUPY7GXdG5Tw5r5qngb/swlkm0rI6WRz3elPTLlyvstIitgkpezq5Xuuy6ur3RuRz+aMWRq597l3+RK+yv9qcn8bf/VaeTqK0r0k0p7Trfxn3bYptObdveE3AB8d7gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABGpdAaYmlfLJbMve5XOXv5Eyq8/xilOz3SyLlLX/wDcS/2iTg2/E5v98/eWfpY/9sfZqLtpiz32eOe5UffyRt4Gu717cJnP4qoZ1Xb6Wut76Gpi46Z7UY5nEqZT3ouTJBx6l+I348fJ121548tLFYobFZ6yLTtOyCoeivY17nPa56JtniXryObrqpaKvmXUmlKKaq4vbWBI3Z88ovF03+07ED0Yepiu/Ur3b99zE/dnfFvXbOtOP2O33PV2to76+jWmo45o5Vdj1URmOFrc+0vqpnH1HSLtpWy3yrZVXGi7+ZjEja7vXtw1FVcYaqJzVTcAmbq73tE1/LqNRophrWJiedtTd9NWi+yRSXKk790SK1i949uEX81UMmrtFvrrc2gqqVk1K1qNax+/DhMJheaL58zNBh6l9RG548fJp21548ovH2eaYjm7xLcrt8o10z1RPhnf4m7rrRQXG2/R1VTNdSeqndNVWImOWOHGORmg6tmyWmJtaZ180jHWI1EMK1WihstItLb4O5gVyv4eNzt165VVXoYdLpWy0V3ddaei4K1znPWXvXru7PFsq46r0NyDn1L8zuefPzXsrxx4AAcOgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA01p0rZbHVOqrdRdxM5ixq7vXuy1VRcYcqpzRDcg6i9qxMRPEpNYmdzAaPVWok0xaWVy0y1HFKkSMR/BuqKuc4XwN4BSaxaJtG4LRMxqJ04ppqw1uqdUrcp6RYqFahaiZytwxcrxcDc888vcdrAN+p6mc9omY1EeIZ4sUY4R246H09c6l1TPQI2Z65c6J7mcS+Koi4z5mxtNhtdjjey20jIEf7aoquc73qqqpsQZTmyWr2zadfV3FKxO4jlqrvpu031GrcaNkr2JhsiKrXInhlN8eRjWvRlhs9S2ppKFO/b7Mkj3PVPdlcJ7zfARmyRXsi06+p2Vmd65aq76btN+fE65UnfuiRUYvePbjPP2VTwLtZZLdcLWy21VK2WkYjUbGrlTh4eWFRc/WbAE9S8REbnjx8jtrzx5aWl0pZaK31VBT0asparHfR99IqOx73bfDBetGnbVYe9+jaRIFlxxrxucq45buVfE2gLOXJMTE2nn5kUrHiGnl0tZp7yl3ko+KvR7ZEl716es1ERFxnHROhTctJWO713ptfQpNUYROJZXpsnLZFRDdARmyRMTFp4+Z2V+DEuFso7rQPoayFJKZ+OKNHK3kqKm6Ki80QotVoobJSLS26DuYVer1bxuduuN8qqr0Qzgc99u3t3wvbG965AAcqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH6hv8AdZdQw6b093TKx0fe1FTK3ibC33b78uaLzTx2po6nVtmvlJSXVW3agql4PSaenw6BeiuRqYRN+vvzsBMgeKqNRVVcIm6qc+ortqvWM1TWWSsp7Za4nrHEssSPfKqdVyi46eGM43woHQgRXSuoa6trq6yXqOOO7UOFcsfsysXGHJ80/nJsnI1q3fUep77X0tgq4bfQUD+6dUSRI9ZX9UTKKmNvljxwBPARTSWoLhW1tfZb0yNt0oFTifGmGysX8bHyXps5NjAS56m1PdLgyyVdPbbfRTLAk0kaSOmenPmi7cvmnMCdAiukb/ca6suVnvLIkuFvciOki2bK1c74+XzTYkNwrorbbqmunz3VPG6R2OaoiZwgGSDndLWa6vNpffqSrpaeF2ZILesKOWRidOJUzlcbb7+WSSWLVMF10mt7mb3SQxvWoY3fhViZdj4bp7wJADndFW641BbpL3QVdLR06q51NQrCjlkai9XKmd8YzlM+SEj03qiK9aYddqhiQup0elU1EXDHMTK48sYX44AkIOd0FfrXU9HNeLbWU1BScTkpqV8TXLKiL1cqL7s+KLyJLpDULtSWRKmaHuaqKRYaiNEVER6Y5Z3xhU93LoBvwYtVcaOiqKaCpqGRy1LuCFrub18E+aGUAANczUFmlqkpY7tQunVeFI21DVcq+GM8/IDYgsVFZS0ixJU1MMKyvRkaSPRvG5eSJnmvkYFRqS0RUdXNHc6J607fWTv24R2Fw1VzzXC7AbYEc0tqeLUlmjkWelhuD2vV1PHIjnRojlRHK1Vzjku/iX9KNqW2X/OrzDdnrK5UqYXI5uPycpzx+nAG8Brk1BZnVfoqXahWozw936Q3iz4Yzz8jS63uVZbWWZaOofD31wjik4fxmrnKKBKwWaqrpqGBZ6uoighTnJK9GtT4qWqG6UFzY51BW09Sjfa7mRHcPvxyAywYdddbdbOH06upqbj9lJpUYrvdldy/TVNPWQNnpZ4p4XezJE9HNX3KgF0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGm1TffudsE9e2JJZUVGRRryc5VwmfJN1+BFq+o11Y7X9N1dfRVEcfC+ehSFE4GqqZRHImVVM77/MDoQMW3VrLlbKWujRWsqImyoi80ymcGn1jqKXT9si9DhSevq5Uhpo1TKK5euOvTbxVAJEDntdctX6Sjprnd62nuVA56MqYo4UasOeqKiJnwyv6SQas1KthsUdVSRpUVNS9sVKzCqjnOTKLhN1THT3ASIHPK+4az0rSwXe6VlNcKPja2pp2RNasSL4OREz4Z8VTZSR6m1PHZNOsuVOxKiWp4WUrMLh7nJlM43xjf6uoNpACBSs7QaChS5urKSse1EfJbmQJnH5LVRMqqe/5k0oKp1bb6epfBJTvljRzoZWqjmKqboqL4AZII7rHUUun7ZF6HCk9fVypDTRqmUVy9cdem3iqEfrrlq/SUdNc7vW09yoHPRlTFHCjVhz1RURM+GV/SB0IEd1ZqVbDYo6qkjSoqal7YqVmFVHOcmUXCbqmOnuI9X3DWelaWC73SsprhR8bW1NOyJrViRfByImfDPiqbKB0MEe1Pqdlk02250zEnkqFaylaqLhznJlFXrjCKv1dSOV1drfTdBFerhV01dTI5vpNG2JGrEir0ciJy5Z3wvigHRAWaWpiraOCqhXMU0bZGL4tVMp9SluK40k9fUUMU7HVVOjVliTmxFTKZ+AGUAUySMhjdJK9rGNTLnOXCIniqgVAwKO+Wm4TLDR3KkqJfyIpmud8kUpm1BZaaZ8M93oIpWLhzH1LGuavgqKuwGxBh0d3tlwkdHRXGkqZGpxK2Gdr1RPHCKWpdQWaGqWmlu1CydFwsbqhqORfBUzzA2IMWsuVDbmsdXVtNTNeuGrPK1iO92V3MVNTWFVREvdtVV5IlXH+sDaAxa25UNtiSSurIKZjlw1ZpEblfLPMro66kuEHf0dTDURZxxxPRyZ8MoBfBFK5l2k1DJcdPXKkrGMjWnqqCaocrI3ovNEblGu23zhdl8dsvS8c1GyqpLjeIa66vldPPEybi7hFxhqNVco34JzAkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADlclFcbp2q3qgpa+ShSSNj55otpO7a1mGtXplVb8vguxqY7jofUFq4LtV11rr5kp5Iqt/G6NVVPWRfjnbw8zcag01cJr1DfrDVRU1yjZ3cjJkXgmZ4Ox/jlywYdNpq+3q+0lz1PPStioncdPSUmeHjyi5XPuTqvLp1qJs5qOarXJlFTCopzu1N1HodKi2RWR92t7pVfTTQyI1Uz0cmFx06c88ze2W63C5a1vsDp0W2USMiji4G/hFRMrxYyvsu69TXsserNP1VSyw1dFVW+eRXsirlcroVXwVOnLr8PENTphtyk7Va2ouUccVXLRLJNDGuUiReBGtVeq4Rptuy7iSw3Bsn4dtxk7387habPSmmZ7M+suFyqW1V1rncU8rU9Vqfkt8vlyTbY1s2nL/Y77W3HTMtG+CuXjmpqviw1+c5THvXqnPqB5Set2xV/dcm21qTe/LMfoN7qbUlPpy3pI5qzVcy8FNTN3dK/wB3humV/SqGHpTTVTaJq25XSpZU3WudxSvZ7LE/Jb/joidDRTaU1e7VM18bV2iWbKtgSdXuSJmdkanDsuPtXxA3mjrDV22KquV1fx3W4vSWdE5Rp0Ynuyv2dC5r5JHaHuiRe13bVX83ibn6slyzN1PTzzSX+otj6VsSq30VHcSOym65RNsZMLSE9bqfSE8t7k75la+RjURqNxF7ONk8UduBudNKxdK2hWez6FDj+Yhz2zJI/s11U6mXEK1Uyx/m4bxf+U2kGndaWu2y2K311vfb3cTY6mTiSWJjuaJjku6+PPZUJRZtN0lo00ll/DROjc2dypjvFd7S+XPHuwB7pJWLpC0LH7PokaL7+FM/XkhNmSR+j9bup1+8OqKnu/dw+t/5cGdTad1lZKGazWmtoJLe9XdzPMrklha7njGyLz8fgSbT+mqWxadS07TNeju/cqY7xzkw7bwxt7kAt6JWN2i7Ssfs9wiL7+v15I7pGpnpJdYVdNTPqmMuD3QwRru93E7KJ8Fae0undYafp6i1WSsoZbfI5ywy1CuSSBF8MbZ+C774TJJtLaei01ZWULJO9lc5ZJpcY43rzX3bInwAgOo9SXOrvun55tN1lNJT1DnRxPdvOvq7N9Xy+snlgvdfd5J21ljqbakaIrXTLnjznZNk5Hl8sMl2u9lrWTsjbb51lc1yKqvRcbJ8jegavUb6Fmnq1blUyU9GsfDLJEuHYVcYTZeecfE5befoSTSb22zSdziaxjXRXGWDh6p6yvTOUVPhv0On6msiaisFTbe97p0mFY/GURyKiplPDbHxIrWaa1ndrGtorrjbI6dkaNRYUdxTK3HCjlxsmyLsnTkIJYerGvumltHNnldx1UtOj5EX1suYmV9+5K6nS9jobBWwQWumbH3KuXLOJVVrV4VVV3VUyu/mYdw0tXVVp0zSMlp0ktckD51c52HIxqIvDtvy2zglU8LainkhfnhkYrFx4KmAIb2a2+jZpCkr2UsLauRJWPnRicbk7xdlXnjZPkRm3XCot3YxUS0z3slfULEj2c2o5yIv1ZT4ku0hYL7p1H26qq6OotTUcsKsRySo5VRd0xhE9rqp5ZdGOg0PNp66yRv71znK+ncqom6K1UyiboqIvIDyLs80/Lp2OiWlYkrok/ztv4Tjx7Wff05GBrKkfQWnTFJJUPqHQ3GFnevT1nImcZ+B62wa5S2pZvpe3tokb3aVSI/vu75Y5c8f+5s7zpSoq7XZKGjqGuS31McsklQ9eJ6Nzlcoi7qq+4CPaxqvS9fUdBVW+tuNDS03feh0zc8b1VfWVOqck+HmpZpWTRa1tVdZtM3S1wvcsNY10CtjcxVREXCbJjOV9yEq1JputrrnSXqy1bKa60ze7++57uSPf1V2XxXp19ylFutWqaq8QV18ukEVPAi8NJQOc1si/v8APNPLf4bgaG9UFRbda112uenpL3bqiNrYnRtSRYEREz6i+79OeZu9Cu0/JFXzWCSoY2WRHTUk23cO3xhvTPvXl5FVda9V0l6qK2zXOmnpajCrS16vVsS/vcdOfh8cF/S2nKq01VwuVzqYp7jXvR0vctxGxEzhE8ef+OahJQARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIFqKvqNX3N2l7O/FKxyLcaxN2sRF9hF6rlPiqY5IpJtS0d1r7JLS2epipqqVUassiqmGdcKiKqKRWzad1rYbe2it89hjiReJVVJFc5fFV4d1Kid0tNFRUcNLA3hhhjbGxuc4aiYQhutdtXaQdJ+B9Lcn8rLOH6yXW5ta23wpcXQurEb99dDngVfLO5q9WabTUtpbAybuKqF6S0835Lk8cb4X9S9CKxu0NWJoS595y4WY9/eNx9ZHb22VkPZ8s/sNlgSXP5eI8fpMup05qvUq0tFqGpoYrdA9Hy+i8XHOqePTx8OfIkOqNNxajsnoKP7iWJySU8iJsxyJhPhhVT/wBiot66WNuiLqsvs9zhPfxJj68EG1U+tg0tomSLHfMbG5mUynGjWcGUN1Vad1dqKKntt9q6GK3RPa6Z9Nxd5Pjxzt9idcLgkuotN01+sX0bxdwsfC6nkame6c3ZMJ4Y2AjN20teLTaJ7vTaouMlwpo1nlSSTMT0amVRG9E54RcoS3Tt1W96eori5qNfPHl6N5I5NnY8sopFKqza6utClorq+3R0bkRk1VFxLJIzqnL9WfEmlst0FptlPQUyKkMDEY3PNfNfNeYES1rtq7SDpPwPpbk/lZZw/WbHtDViaEufecuFmPf3jcfWZOrNNpqW0tgZN3FVC9Jaeb8lyeON8L+pehH6nTmq9SrS0Woamhit0D0fL6Lxcc6p49PHw58gMS9tlZD2fLP7DZYElz+XiPH6SUa6WNuiLqsvs9zhPfxJj68FzVGm4tR2T0FH9xLE5JKeRE2Y5Ewnwwqp/wCxHKrTurtRRU9tvtXQxW6J7XTPpuLvJ8eOdvsTrhcAYV5SRmntBuqF+8Nnpu9/mtx9WSY6xWNujbusns+jPRPfjb68Huo9N09/0+trykPBwugeibRuamE28MZT3KRmp07rG/UkFovNbQR29jm99NTq5ZZkTlnO2fgm++4GdbbtcbPoqxOgs9RcnyU7cpCuOBuEVudl6KnyI3bdS3SHXF6rGaarJZ6iOJJKZrvWh4WtRFX1evM6nBBHS08VPC1GRRMRjGp0aiYRDT0Fiko9WXa8umY6OuZE1saIuW8LUTdfgBn2mtnuNsiqqmikopn8XFTyrlzMKqb7Jzxn4kT7QFfW3DT9jdK+Okr6pUqOFccTWq3b/wA3zwTk0Gq9OfdDQQpDULTV1LIk1NOn4rk8fLZPdhPcRWJcNAWeobTPt8f0ZVUz0fHPTJ623Rc8/eu5re0iz22LSldXx0FM2sWSNVnbEiPVVemd+e5W+wavvUlNBe7pRw0MT0dIlCrmyTY8VwmP8bG81fZai/6bqLbSPiZNI5itdKqo3ZyL0RV6eBUKa1We1WaWqjpoKBHUi99UQRox7W8OVXKJ8fehz2VNOP03VQ2vSl1q2d29WXGSD8ZM+tx+CL0x05HTq+1NuOn5rXM/hSWDule3fhXGM+e5EIdNaySyLYX3K2x29IliSVjXLK5mNm8sIi8lXnjxA2OjqOkvmhLQ66UsFYsbXtZ37Efwoj3NTGfJE+RqdDWK01c19WottLL3NykZFxxNXganJEzyQlek7RUWLTNHbap8T5oePidEqq1cvc7bKIvJfAsaWsNVZH3ZamSF/pla+oj7tVXDV5IuUTcCFV1Wy4doN1kuFlr7vDRI2GCnhj42Rbbq5PNUVU9/khn6WjqaXXEj6Cx3K22mrgXvYqiJWsbImVRU6Jyx/KU3F303d4NQvvunKuniqJ2Iyqgqc93JhERF2Tnsnh791MuxWq/suctyvt0bI5WcEdHSq5IWeaouMr/jPLARO2X5tgdq6djO9q5bq+KlhRMrJIrnYTHh1/8Acr0Nb6q2a/utPXTLNVrRtlnf4verHL8lXBtrJoWSj1hcL5cJIZWvqJJqSNiqvAr3KvE7KJuiYTbP1IbSisFVTa6uV8fJCtNVU7ImMRV40VEbzTGMeqvUCRgAigAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADFpLdSUL6h9NA2N1TKs0ypn13rzVTKAAAAAAAKJY2TRPikbxMe1WuTxReZbo6Ont9JFSUkTYoIk4WMbyRC+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADGqrhR0SZqamKLyc7dfhzL0M0dRE2WF7Xxu3RzVyigVgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaTV9wqrVpatraKRI6iJGKxytR2Mvai7LtyVTqlJvaKx7pae2JmW7Bym23jtEu1EysoUZNTvVUa/hhbnC4XZcLzMvvu1D/AHDP/wCD9Z7J6GYnU3r92EdRE8xWfs6WDnuhdS3y76hrKG6ztekELlViRtTD0eic0T3kpu2qrPYqplNcatYZHs7xqd052Uyqfiovgpjk6bJTJ6fmfly0rlravd4j5tyCG3TX+nJrRWxU11d374Htj4YZWrxK1cYXh236kf0JrK3222VLL3dJu/fNlnepJIvDwp1RFxvk7joss45v2zuPbUuZz0i0V26kCMf5Q9LqqI25K5VXCIlPJ/ZNTry8y0twpaak1Ay2SsjV8rHskXjRVThX1WOTopzTpctrxSYmN/GJW2akV7onaeg5Har1dKiv7hNYQ1EkkUjIo0ZK311YvCqq6NETC4XK+BhXa+6os/cI7UtNVd7nHosrZOHGPa9Xbn9Snoj9nXm3b3Rv9f8ApnPVViN6/s7SDl0UGsquRIafVtsmlci4jiqmq5fciNOoMRUY1HLlUTdTy5sHpa/NE/Rrjyd/tp6ADBoAAAAAAAAAAAAAAAAAAAAAAAAAAAAeKqIiqq4RAPTT3K79w1Ui8cIvVV8jG1BqaG0UjHJHLM6WRIY2RIivkevJGoqoUR5WFkk8bY34yqcWUavhkDHpVrPS0q3yKkmc4dyx4YKrzeJ44lxIrMb4YuN+iF19RE1iuR6KqdEIpcKl1ZVcLMuTOERPxlKiW6Qr56qhlhmRzkhd6si9UXfHw/SSMwLNb0tlsip8J3mOKRU6uXn+r4GeRQAAAAAAAAAAAYNdcm0r0hjb3k6pnhzhGp4qa51wrXLnv0Z5MYmPryBvwRyS410bHPSpVeFFXDmNx9huLZWOr7fFUOajXOzlE5ZRcAZYAAAolmjgYr5ZGRtT8Zy4Q1s2oKGJWtY58qudwpwN2z712A2p4qoiKqrhE6qRyov1Y/LYoo4PNV41/Qn2mpqHzVS5qZ5JvJ6+r8k2+oCVT3y2wLwrUte7wiRX/ZyMX7p6Lix3VRjx4U/WRlWoiYRMIUqhUTqkraeuiWSnkRzUXC9FRfNCBX+9VFbcldTyyJSx+q2NrlRHJ4/EzLZXuttZ3qIro3JwyMTqnRfen6VLd5gtUrXVVDO5kjt3QLG7mvhtt9gVqIpI5WqrNl6pjc2NtutRa5uKJeKNV9eNeTv1L5mohietSkiNVrUTfO2TLVAjodvuVPcoO8gduntMX2mmYcygqJqSds0Eise3kqEhZrPgjiSajc93KRzHY+KIRUsBiUFxpblEslNJxY9pq7K33oZYAAAAAAAAAAAAAAAAAAAAAAAAAAAACOah1pbdNVcVNWw1T3yR94iwsaqYyqdXJ4HePHbJbtpG5c2tFY3KRggv+Vew/wDZbj/4bP7ZJ7DfaXUNu9Oo2Ssi41ZiVER2U9yr4mmTpsuOO69dQ5rlpadVlswAYNAAAAAAAObt7S699TNWR2ZZLLFL3bpmZ40Toqryz1x54ybYenyZt9keHF8laa7nSARTSGqK3U9XcJHUrIrfC5GwP4VR7squyrlUzjGceJKznLjtit2W8rS0XjcABHtX6mdpe3wVTaRKlZZe74Vk4cbKueS+BMeO2S0Ur5ktaKxuUhBrbBdFvdjpbisKQrO1V7tHcWMKqc9vA2RLVmszWfMLExMbgAByoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARvX/wC0e5fms/5jSSEb1/8AtHuX5rP+Y026b+dT6x/dxl/gt9EO0lr+02HTtPb6qCsfNG56qsTGq3dyr1cnibv/ACr2H/stx/8ADZ/bKtAWe2Vej6Sapt1JNK50mXyQNc5fXXqqEm+56y//ACe3/wBGZ+o9nUX6aMtu6s73Puwx1y9kamHO+zaoZVazu1RGioyWKR7UdzwsjV3J/dtL2a+VLKi40ffysZwNd3j24TKrjZU8VIH2eMbHru9MY1GsayVGtamERO9TZDb671jV2mZLRbqeVtXMxFSdW8kXb1E6r0z0+zvqKZL9VrFOp1DnFatcO7/FENbUVkprpHabBQok8WXVD2yPfuiZ4d1VNkRVX+5SP2ltPTVdLWXKjWotrpVikTiVOiZwqKi5RHIvmbe2XCk0226wXKhqJLvNE+DjVyKkSOb9qqu6/wB5atF8tdPpits9yo5pu/l72OSNUzG7hREVM9dvkuD6le+uPsiJmOOd8zvzMPJPbNt+HT6fQ2kqmCKpp7ex8T0R7HtnkwqdF9o1mv6VyVlFPT0NqnlkY5sjq6VjFwmMInE9uea8iJ6c1Nd9H+jw1dNJNbapqSxRqvRerF+1PsJF2i1dK6otDam0yVbpWOWNneuje1VVvq4TOV5HzoxZqdRWLW7o51zv/mHqm9LYp1Gp4aW1zMoK5tVdbdZYqKNru8fRSxvlblMIrUbIq81ToR98enXanYyOWqbZUxxSOTMi+rldseOxvrBDbvuogt1Vpl1LM9kiq2plc9MIxzkyxyb8jGs63u/Ryvttgs07YlRHqtJC3Cry5qh7Yntm1p44j3iI53r3nl55jcRHz+H/AOLVmudjseu21tLJOtqjaqMc9qq/ePC7fnKp22nnZU00U8eeCViPblOiplDj7HXCi1PbbVd7FZ4VqZY+JraSJVVjn8PNucclOxRsZFG2ONqNY1Ea1qJhEROh839o6maz76873w9XS7jcKgAfMesAAAAAAAAAAAAAAAAAAAAAAAAALNRUNgZ4vX2UA8qaltO3xevJCKrqijqaKtrpJ5EoqNytfO9vCxypzRvVcLty3XZMnmporvXWmWntL42VdQvAs0j1akbV5qmEVc9Ex456EXtFrrb1cW0VVNTOsNqc1qQ00Stjlnb+LlVVXNb1Vea9ANxZKKe41X3R3VjmSOavoVM//wCGiXqqfluTdV+B5dbpIsvdxrjH1f3kmli72JzEXGU5mtZZaaCSSqm++uxlGu9lF/SBo4qK71tL3kUM0kK9UTn+skGnNNyU0yVlcxGyN/BxqucL4qYdpustLf20zfWhmVsbm+C9FT5k3AAAAAAAAAAAAC3LUQwJmWVjPznYMR90j5QRPlXxVOFvzX9QEPuF9jg1hW0D1xO1W4a7k9vAi7L4oim5YqSRtenJyZQoqLbT1dzS5VMEK1SN4Ec1uNvNepkK0Cw9iPY5q8lTCl+3Vz7dSNplgWVrVXhc1yIu653RSl2E5qUqgGY++SY+90a5/fyIifVkw5rjXTc5kib4RNx9a5/QUKhSqAWHxo9/ePy9/wCU9VcvzUtTRJLE5irjPJU6L0UyVQoVCox2PWaPLkxKz1ZE8/H3KeKhcfHlyPavC9ExlOqeC+JbV1Qnssgz+Uufs/vCqXMVGorsNReSuVEyUPjcz2kxkpSmRZVmmcssq7cTk2RPJOhUjkiciO/Au2cn5K9FT9IRaVChUL8jFY5WrzQtKgEfv15moI5obfTpVVscXfOjz7LM4yqc19yeCkeptQXist6XO3TR1qR/6TQvjRHx+bVTmnh+kx9T19Rp3XsVyaiuhmhajm/lM5OT37IvyKr1Qy2arj1TYFR1LKiPnib7Kou+cfkr9S/UVI7RqOgvFE+ojkSJ0SZmjkXCx+fu8y/bbvRXiGSSimSRsbla7bCp8PBSE3u20t8ti6iszMP51dMnluuydfHxTcotENZJVtvOm441a9eCqoVejUjXrz/FXmi9AOlUtXPQ1DZ6d6te35KngvkdCtNzjutE2dicLkXhez8lxzfdWorkw5U3TOcKbzS1yioa2SCZeFlRhEcq7NcmcfPPP3BE6ABFAAAAAAAAAAAAAAAAAAAAAAAADkXaz/r+i/iv/rcddORdrP8Ar+i/iv8A63H0P2Z/mI/V5ur/AJUt7S1HZ4lJD3rbd3ndt4sxLnON+hvJbvZ7BpCW62qGJ9Ei5jZCnCj3q7h+3n7jUUugNKy0cEj1fxuja53+c9VQ3FbZ7FTaPdaJqplPbVy1sr5k9Vyu4k9ZeudxkthtaIibTzzE/ArF4iZ1EcIXSak17eqWW5W6KNaSJyo5kcbMLhMqiI71l28CUaW1TXXm01q11ItPWUsfFxcCtbImFwqIvhjch8Ok7/bKaWt03fYaukaqqq0s6t4lTnlvsqvxU3WjdX19+o7lQXFWyyxUzpWTI1GqqclRUTbqh6eox0tjmcda6jXjiY+rLFa0WiLTO5+zR27tF1NVPfSwwx1dXKiNha2H2V5quE57fDqXqXtB1FZ7w2nv8PFHlO8jfCjHsavVuMZ+vP1mJ2XVdLTalmbUPaySanVkTnLjK8SLj3qifUZPavV0k94ooYXsfPDE5JlaucZXZF8+a/E3tjxT1HoenGpjyzi1/S9Tu5TPWOqp7FQ0/wBHU3pNRUoqsdwq5rGpj1lxz57EOqtT69tNNHcq6JraSVU4UkhZjfdEVE9ZPiZupNW3LTtnstrolbFVOoIpJZXNRyt2xhEXbm1cmr1VbNS0lgbU3q+xzRSvaiUzZFXiXn4Ii45+Bj02Gta1i9a8z78zP0+DvLeZmZiZ4+yY1Wrpqrs8nv8AQtSGpZwtVjk4ka7ja1ffsv1kSturtVXS3SUtpomPna9XyzQ07cI1UTCY5Z2dz3X4FdB/sXuX8YT/AJkZvuydqJpmrdj1lrHIq+SMZ+tSTTHhxXt2xOrajf6LE3yXrG9bhh6F1rcK68JZroxiveju7e2NI3Nc1Mq1UTCckXpzPdU6+r4r06zWCJrpmP7p0qs43Ok/JanLZdt8/r0loRE7Y5Mf9tqPseWNNSR0PaivprkY5KmePicuMPXiRPmq4+Jrbp8XqTk7f9O9fPlxGS/bFd++ttj922rNO3CKO/06SxvTiVj42tVW/vXN2z8zZ9p1VDXaUtdXTu4oZpmyMXxRWKqFvtbqKf0W3U3E1alHufjO7WYx9a4+RqdRRyRdlunmyoqOWVXJnwVHqn1KhzirS84s0V7Zmdcfqt5tHfjmdxpPtCftJtn5jv67iREd0J+0m2fmO/ruJEfJ6j+df6z/AHe3F/BH0AAYuwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0Os6KquOkq6ko4VmqJEYjWIqZX12qvPyRTfA7x3mlotHslq90TDk9o7PdQSW6NzrrJb1VV/wA34nerv+9XG/Mzv8nV+/dNJ/Ok/WdKB67ftDNM74+0MI6bHEOcaB05d7LqSulr6Z7YXQuY2ZXIqPXjbvzzuiKp0CSjppqqGplgjfPCipFI5qKrM88L05F8GGfPbNfvniWmPHFK9sNTe7bBPZ7isVFDJUyU8nDiNOJzuFcb+OSM9nVimo7TVxXW2d3Is/Ezv4kyqcKcs+4ngFeotXHOP4k44m0W+DHloKOaOGOSlhcyFyPiarEwxyclTwwQftBtF2ud4s77ZTyOdCqr3yJlsblc3Cr7sZOgAmHPbFeLxzpcmOL17XM7fpvVEWtqe5XZGVaMie11TE5vDvG5ETGEXmqdOpqdN6b1rTRVCW+Rba1zk42z+rxrvunqqdiB6f3hfUx2x4iPHw+X6svw1fjLkrtPaqTWdpqrq19b3UsSuqIk4msYj84VcJy3X4nWgDDP1E5tbiI18GmPFFN6nyAA87QAAAAAAAAAAAAAAAAAAAAAAAqoiKq8kAGqq3tlqOJuVRE4Sp1bNNxt4EYxVwniqeZjKrldwswmObl6AYF8p7hU2eogtk0UFTI3hSaVyokaLzcmEXfHIjnZstwfbKlJalktsgk9HouCFGJIjVXik8VyvivPJNFpkWLL3q9r8tcx6bKmCpjGRsRjGo1rUwjWphEA9NbeKttPTKmd8ZVPsQ2T3JGxz3bIiZUhd4q3VNUrE3wu6J4+AGbpSkfV3r0l27YUV7l/fLsn6V+BPjVaftn0ZbGMemJpPXk8l8Ph+s2oAAAAAAAAA11ZErpFd3krUzjhbIqIuyKbExatNlVeW2PfnH6QNe2CJi5axqL443KlQrPWxuflUTZOaryAsqhi1lbR0DEfWVUFOxy4R00iMRV+JsHwua3i2Vvi1cml1FYKTUNonoqmNivcxUilVuVjd0VF6b494EO19U6bvNrdS+mtqLnG1VpGUrnSu4/BUblN8Y3KNLS6wp9O0tviskECwo5EqK6ZW5RVVU9RE4tslnssuTaZ1dpyrhbFXU8jnovCiK5EXDkVeqov1L5HSVQDj1Td9V2HXNNb6u6tnWrliVzeHMXC92MI1fZxvyxyOsqhzLVsX0l2uWamp95Imw95jpwvdIv/AJSe6gusdktE1Y5vHInqQxpuski7Naidcr+kDAs2qbdfa2po6VtQyemz3jZY8ImFxzRVTn5m5VCD9l8TI7bdGyxuZcW1atqUevrbJtlOm/F9ZOlQC0qFCoXVQxa6R0FHLIz2kbt7yotS1VPE/gfMxrvBXci3PPAkD3OkYrML1zkrhttPBEjXRMkkx673tRyqvXmW0ttIyTjSBvEi53zj5cgLjeNaanWT2+6bxe8ochedlVVV3VS2qARrV2n0v9oVkaIlXDl8Cr1Xq34/bgiOiL+2me+wXNOGNzlbEkiey5ebFRfH7c+J1BUINq/RTrpMtwtvCyrX8JGq4STzRei/aFaashn0JqFKmBHPtVUuHM8E/J96dPL4m1sVtSHVdVW2xU+iZoUdlPZVy4XDfHG/uzgvWaivtyo20WoqWFaSFWqjpMOkkVq7JsuPevVPfklfCjURrURETZEToBaVC25C8qFtyBEw0zfVq2pQ1Lvv7E+9uX8dE6e9CSnJlm9HkbI2Tge1eJqou6KbOjutZUXiS5ySOijWNMcblRuUxnhTw57eYV0YGLbq+K5UMdVCvqvTdOrV6oplEAAAAAAAAAAAAAAAAAAAAAAIhq3Q/wB1FwgqvpH0buou74e4487quc8SeJLwaYst8Vu6k6lzelbxqzmP+SD/APff/tP/APslFs0ZS0elprDWTrVwyvV6vazu1RdsY3XdFQkwNsnW58katb+zOuDHWdxDmi9lVREr46a/yR08mz2LCu6eC4dhfqJHZ9JUWl7NXJA901TLC5JJ3phVREXCInRCUFL2Nkjcx6Za5FRU8UF+szZI7b24K4KVncQ4bovTVNqeavpZ5XwvjiR8UrUzwrnG6dUJhaeyqmpK9lRX1/pcUbuJIWxcCOX98uV28iYWvTtpsssktuo2QPkbwuVHOXKfFTaG/UftHJe0+nMxWWePpaxEd0blGdWaMpdUNhkdO6mqok4Wyo3iRW+CplPtI7H2TsdSPZUXiR82ESJyQ+rGmcrtxb+HNOZ0gHnx9Znx1ilbcNbYMdp3MIlBojuNF1OnfpDi7+RH+kdzjh9Zq44eL9749TP0npv7l7XLRel+k95MsvH3fBjLWpjGV/J+s3wOLdRktWazPEzufqsYqxMTEeEOpNCei6ydqD6S4szyS9x3GPaRUxxcXTPgNUdn1HqCrWthqFpKtyJxuRnE1+OqplN/MmIOo6vNFov3cxGv0T0aa7dcOb27snhjqmy3K4uqI2rlYo4+Hi8ldnl7vmSbVWlWaktlNRMqUo2QSI9vDFxJhGqmMZTHMkQLbq81rxebcx4IwY4rNYjy11htX0JZKW3d933cNVO84eHiyqryyuOZsQDz2tNpm0+ZaRERGoAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh2ou0a1WKp9FiY6uqGriVsLkRsfkrvHy+eAJiAAAIjbdf0V11V9C0tM97Fc9rarjThdwtVVVE8Njbah1HSaco45qhkkssz+7ggiTL5HeCAbgEOh13JBW08F6sVZa4ql/BFPIvE3K8kXZMfWTEACIv19RfdbHYYKZ8yulSFahHojUf1RE645EuAAjmodWx2Wtp7bS0M1wudQnEymiXGG+Krhccl6dF5FNh1e263Oa1V1vmttzjbx9xK7iR7fFrsJn5fPcCSgj2otVxWOppqGCjlr7lU7xUsS4VU8VXC4TZenRSzY9YJcrs+0XG3TWy5I3jZDK7iSRvi12Ez1+XvAk4I/qPVUNgkpqWOllrbhUr95pYtlcniq74T4fpMey6xWuvC2e6Wya13FzeOOOR/G2RPJ2E8F+S7gSgAAACDagvs8V6mhp9Q1dC2JEa6Flp79OLGVVH435gTkEKtVReL3RuioNTSLPBJxSzT2pI+Jrk9VqNXHJWuXKeJZv7tV2C1LcH6ihqGtkYxY0oWNzxOROeV8QJ2DRajrLzb4PS7fJa46SJiuqH1qSKqeGODoc+1Bra6VdjqIW3Wz5dw4WhbUsm9pF9VXIiJ5+WRpNuvA5ouvrk1MrddNYT/wClVf2SeWn6W9Ed9M+hek8a8PofHwcGExni3znP1BWeAAAAAAAAAAAAAAAAYdXOit7tjs59pUKa+bCNhaq5dzx0QwWU6NYjUnkTCY9nP6QKnORjFd4FVPGuGtdzXdy/aUpC1HIquc9U6u/UXWqrVyi4UCiSpY5+GZd0RrUyeJ3zuTGsTxev6ELv1J4JsANZdJ201M/vZFevDlGtTCfM0ul6Bbhd+/kTijg++OVeruifp+Bf1I93rN6Zanwxk32k6ZILGyTHrTOV6/PCfZ9YG8AAAAAAW554aaF01RKyKJiZc+RyNanvVSL1faTpSjkdG+6JI5q4XuonuT5omF+CgSwGBaL1bb9RJV2yrZUwZ4Vc3KK1fBUXdF95ngCxU8CMRz3I1PZ4l5Jn+/BfLVVTR1dNJTypmN6YXAGDL3dOxZKiaOJiJnKrz9xopFqdTVKwUq9zQQ83uRfWX9K+RarLRYrXKvp924cYXukxx49yZX6iiXXNtt1MkFvpeCNqbOmdwpnxxuq/NAM9lqrNPu9JgnWopU/DRcOFx1VEz0No5GKjXxqixvRHMVPBTndw7RJ5uJrJHuT8iJO7b8+ZNLC6Z+nqF1RGscjmK7gXm1qrsB79EW9tx+kG0UDa3Cos6Roj1RfFepdnljp4JJpnoyKNqve5eSIiZVTJNdc7al0aynqHZo88UsSf9bjk1V/J6qnXbplFCE6Itc1zvVw1hWxqxatzm0bHpukfLi+SIifHxJY61NnuTa6rckz4cpTR4w2LPNcdXL49OSY3ztEY1jUa1qNaiYRETCIhSqAc/rmP092lUtXDG9KK8NSGfDV4Ul5Ivhnl83E3VC8qFtyAWVQtyxtljdG9MtcmFQvqhQqFRhwvdlYJVzKxPa/Lb4+/xKnJuVVEKyI1zF4ZWLljvBfD3KURyJNHxcPC5F4XsX8VfAClUKFQuqhbVALSoWnIXZHNY1XOVETxUtMinq28ceIYOssnX3J1AtuwnMoVC+tPRR7JG6d/5cjlRPkhjOakcjeFOFj8ojcquFT3+8DxUPFpUWNsk8zmMfnhZGm6p7ypSuGRqIsUv4Jy8/yV8QLDVp4fwFMzi/Lk9df1GNVSS1G8j1VU3TPQyJo1ilcx3Nq4Md6AbPS17+ja7upnYpplw/K+w7ov6/7jpBxqVOF3F06nQtIXlLnbVp5HZnpvUXK7ub0X9BFSMAAAAAAAAAAADUX7Ulu07SpNXS+u78FAzeSRfJP08gNuDS6X1FHqe0ur46d0CJK6Pgc7iXZEXOfiZt2utJZbbNX1snBBEm+Eyqr0RE6qoGaCCJ2h1ccDK+r0xXQWp6piq4+LDV5OVvCmy5TqS991omWhbqs7fQki77vURccGM5xz+AGYCDJ2hVL6da+HTFxfa0yvpOUReFOvDjl55JZQXaiuVpjulPMi0j2K/jdtwomc58MYXPuAzQQdO0CrrO9qLRpqtrrfE5UWpR3DxY5q1vCuft9xJrFfKPUNrZX0Tnd25Va5rkw5jk5tXzA2QNde71R2C1y3CtcqRMwiNamXPcvJqeZGY+0GanlppLzp+qttDUuRsdU9/EiZ5cSYTG2/iBNwYF4vFJY7VNcax6pDGnJqZVyryRPNSKs7Q5oPRqi66eq6G21LkSOrV/Em/JVbwpjbfny5ZAnIMK6XaktFqmuVVJinibxZburs8kTxVVVCJN7RJ4WQVlw07WUlqnciMrFfxbLyVW4TZefP3ZAnQKY3sljbJG5HMciOa5FyiovUqAAAAAAANde71R2C3LXVyvSFHI31G8S5XlsXLpcEttoqK7uny91GrmxsRVV69E28VwBmg1OnJrtU2WGpvLYo6ub1+7jYreBq8kXKrv1X346G2AAAADRWy/yV+qLvaXQMYygSPhkR27+JM7ob0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHOO0OzUFn0S9lDTti72sY+R3Nz3LxLlV68zo5Du0yiq6/SiQ0dNNUS+kMdwQxq92MLvhBBKYkR1tdajgp9O2t3/SdzXgyi/govxnL4bZ+CL4Gyv8AqL6Eq7XTMpFqZrhUJCxvecPDuiKvJc80MK7aHp7rfH3ZLpcKSpexGZppEbhETGEXGQNFHaaaydpGm7fSpiOGgkTK83LiTLl81XcnFfQUs8kVdLRtqamjRz6fPNHY6ea4Q53WaJrG63t8DbjepaZ1O5X16vVXRLh3qo/GEztt5+ZK9Rvv9sfb620Nkraanyyro0wr5UxhHIuM558vLbmVES1Leau/VNtt19tkljtnpKSPqJ0c/jciKiNRUREbzX7em8v1nfZbVbI6Wgy+6V7u4pWN5oq7K74Z+aoR3UNyuetKBllt+n6+mSWRqzVFdF3bI+Fc7L/hfLc3120PTXato6x1yrqaopadsDH070auEzvnGcrlQI3V2GHTt40RQxqjpPSJnzSf7yRe7yv6E8kQ6acuvuiayK/WOOG5XusiklektQ6RXrTJ6uFRyJ6ud+fgS6a8rZLrZNOxxS1stTGrVnll9ZrWpu523rKqIq9OQGpsTG1PalqOpkaneQRRRRovNEVqZVP5v1jViNpdfaTrImJ30skkL1RN1b6qfVxu+ZReYrhprWztRU1FPW2+thSKrZTt4nsVEREVE/kpv7022PLelw1ZrWlvUtBUUVqtzHJA2pZwvke5OePl4p6qeIFy1tbU9rt6llRFdTUkbIkdzRFRiqqfNfmNao2n1fpKsjREmdVrC5UTdzVVqfJOJfmeX6C4ae1pHqWjopq2jqIe4rIoG8T24xhUT4N+SptktUy1+sdZUFzfb6mitFsRXx+lM4HySL4J70b4p6vmBdpEbVdsdeszUVaS3tSHPTPBlU/nuT4nuvGNhvulq1iJ37a9se3NzVVuU/x4nuo6a42TV9Nqego5aymdD6PWQwty/hzzRPl/N35mMx9drXV1srEt1VRWi2L3yOqmcDpJNlTCe9G+7C+KIBIbxq+xWuWpoqu4tiqo2etHwOVUy3Kck8FQjeiNZ2Wh0tR0lxuiNrGufxtka9y7vVU3x4KhNa600FYyZ81vpZpnsVOJ8LXOXbCbqhH9E6chpNK0kdztUDa1rnq/voWq/wBtcZXHhgCXkN1M+pbd8RLqhG923/VkbFi6+PXxJkc+1LNRJrxIblNXpS/RrXNZSOkzx945MqjPIkLJLdLtQ2mgjop7nFPWXVtNx3iFqyI1zU3RE/Fz9eTOummtS3iiWjrb7RugV7XqjaThVeFUVN8+RFGVkcVogrXTVL6Cj1Qju8m43ujhRqYznfZOn6SU3ntCsbrPVx2u4ulr5InMp2RQv4uNUwipluNlXPwKjcatkjdYqiiWPvn1LeBYmVDInq1eaor9tjmuoVr2aanikddEgajG8M10p5WIiObjLGJxL05HQK6wVF3slvfUwW+W7MijSaWtp+8T2fXRETGPWIHqy2Q0FqrIHz6bbVxqxFgpabu6jKuavq+tnkueXLIglfu76qS1VDbit6kpOFHSMdd6V6KiKipsjcruicjqdDWw3CkjngkY5HNRVRr0dwqqZwuOu5ArjoO5zW6eOGn0+kjmKje6oljfnydnZSbWa1U1ot0UFPSw07la1ZUibhHPwiKvnyBDYAAigAAAAAAAAAAFL3pGxXryRCowLhLxK2nTru73f4+0DFRyyPdK7m7l7ioxq+uht1G+pn4la3CI1jeJz3LsjWp1VV2RDB07da28UtTPW25aHgqHRRxufxKrUxuuNs5ynwA3BE9Z6srNOtjZbqFKyZrFnqEVFxFEi4Ry45ZX7FNpRako7hf6m1UyPkWni43zp7GUdwq1F6qnX5EQo86yulVDE7NLU1DZq6ROSU0e0MOfF+FevgjgOgWypkrrVR1csXcyTwMldHnPArmoqp8MmWERERERMInJABi1dvgrPwiLvsuOpRJQsioFiY5/DG3LUV2yY35GcWayRIqSRVXdU4U96gYFivUrrm+11LuNMcUL154xnhXx6/IkxzuzvWo1lA6Ndkcu/kjVydEAGuvN1baqRH8KPmftG1eXvXyQ2JpNVWZ15ss8UMrYqlsbu6e5cImU3RfBPPyA49qnUU16rXRpM+oRq88+o33JyI56Cjt3qmfJDISJ9LI6mmjWOZi7ovXzRepWBIOzu5QWDU8iz18FLRS07u/7+RGI5UX1ceLsr8lUn9d2raTo2/e6yWrfnHBTwuz83YT6zis9C2eRXq7n0VMnkdvjYuVXK+SYA6NXdsk0yq21WlrG52kqnquU/NbjH85SN12ttQXRHNnr5UjXbu4fvbcL0XG6/HJpWwsamEbn3lzkBIdL2Cs1JUTItR6NTQoiySNbxOVV5Innz9xNGdnVjb+EWsmd+U+dM/U0s9mSNSyV6/jekNRfkn95NFAjtv0XZLdUNnZTOmkauWd/Jxo1fHGET5m/VVcuVXK+JUeKgFJamlZCzjkcjULxjU7EkklqXpxPZIsbEXkzHX3qBaVKyowrGtp4l5Pk9pfchStBGv4WoqJV8nI1PluZq5VcrupQqAYnoNM3dvpDfNJf7i0rXwzsZxufHI1Vbxe0ip59eZmqhYk9etenSFqRonmu6/WBQqFCoXXIWJZoovbe1vlncDxUMWeNzJO/iTL0TDm/lt8Pf4Fxs0tSuKSmkl/fYw35nq0UrkzVVbY0/wB3CmV+ZUY7p4UibLxpwO5L193vLbPSapM08Coz/eybNQvSxxUiNkpafi4HZfx+s5yeKJyyVvndUNbJ3ivY5MtXpgCw2lghdxzO9KmTki7MT4dTyeV8y5eucck6IVOQtuQCyqFqRnGzhzhUXLV8FLyoUOAx8T4/0aR3nGnEn1HqU0z8LO3uIeqv9pyeCIVrsUO3ApqZO+nfJjGV5GO5Nii41PoVuqqrGe5idIieOEyc0sWrq6G8NWvqny0078SI9dmZ6p4Inh4AdGehds1c6zXSOqYq8CLiRvixeafpKXoY70A7Gx7ZGNexyOa5EVFTqhURfRN0SqtrqGR+ZaVcNTxYvL5cvkSgigAAAAAAABgS2agqLvFdJqdslZFH3cb3b8CZVconLO/PmZ4Ag3ZT+1KX+OSfY0ye0q21Ny0ovosbpXU07Z3RtTKuaiKi/wBbPwLHZ9T11p0ZV9/QVDalk0sjKeRisdJ6qYRMp1VMZM5LlqO66PdW0dAtvuyOVW087fbRF39rGMpyz4eeS+6NXdNfWK56ZqKaifJLW1dO6GOkSFyuR7m4wu2MJnx6bGDpttPXdj9TT3Kq9GpWrI3vlTPAiORybdfWXl15Fyq1NWVtDLTW/SVbT32oZ3b5XUyNazKYV3Gu/jjOENhWaMqU7NmWClkYtWxEkXfDXv4uJyZ8N1RM+CAR+k1fqGn0clPBp6Sanig7mOvRjkYsaJhHcGN9k55wZsiU1u7FqhLdWJUtcxEfKiKm75ERyYXdNlVPr6mdTazulLbI6OTSV0W4RRoxGMhXunKiYznGyfBfeV6f0bUx6BrbPcXNjnrnul4W7pC5Ubwpt4K1F2AwLBWavfpukkslst0NvgiRsUdQq95UYT1nbKiJxLlenPmpJdGXWju9nkqKa3xUE7ZlZVQxxo376iJlduecpz36dCPWvVN107Z47PcNOXCWupW91C6CPijlRPZ9ZPgm2f0GXp6iu+m9LXS6z0Lqm6Vkq1K0cfNMrywmd91XCe7mA7QEbUXbS1BK1HU89wRZEdyXCtTC+9HKbXXtPFU6JuaSI31I0kaq9HIqKmPs+JrtS0N11DpW23OmpHU12pJGVbaZ/tIqc279eS4XwxzNXer7ddYWtlit9jr6Weoc1KuSpi4Y4moqKuF96dcLhOW4Fu/yLcNM6IpKhqrFVT0/eq7kvqo3f3o5VJdrSmiqNGXVkiN4WU7pG5Tkrd0+tDA1Zpuoq9J0dLa1zVWx0clOi4y7gbjHvxv70NJd9RXfVNnbY6Gw19NXVPCyqkniVkUSfjYd4e9E28VAxr7K6u7P9JU0zVbHUTwRyKvgjVbv7+fwJxqylhn0ddYXsb3bKSRzUxsitarm/JUQ1ep9LzVeiaa2W93FU29InwZwnG5jeH5qir8TS3PU151DY/oOksFwhudS1Iql8sPDFGn4yoq9F88c+oG407qKgtehbNUXWpSma+LumK5qrnhyickXohpKDWVnj7QLrWS3TFvlpo2wuVHq1XIjc4TG3UnFBZKSmslFbaiCGpbSxNYiyRo5FVEwq4XlkjtBpqNnaBdamW0wfRz6aNIVdC3g4sNzhPHZQJVbrlR3ajbWUM6TU7lVEeiKmVRcLzNVq2Kvmt0TKS7wWmBZE9JqpH8LkZ4NXovxTlzN5BTw0sSRU8McUacmRtRqJ8EIT2gW6sqK2zVzbfLcqCklctRSRJlVzjC4TnyX/CkVpYrm2xaps8Vr1TPd6esnSCogmm73h4lROJF5JuufHbrubK9NvFy7R3Wihu9TRUz6Jr5e7evqtzurUzhHKuEz4Kpq62Ge636wVNs0lPbaCnro1fJ6Ikb3es1VVUamzUROa7b+RKI6OqTtVlrFppvRVtvAk3AvBxcSbcXLPkVEf7QNPutuj4X/AEtcahIHoxWTTcTZOJyrxOTqqZwi+CG21HSVWnOzy4LT3e5TT8cb21E1QqyMy9iKjXJhUTnt5qbDtBtdXdtJT09FE6adr2SJG3m5EXfHwXJq77WV2pOzeuRtnr6erR0Ufo0kLuNyo9iqrUxlU59OigXNR3a5vgsFjtlSsNZdGIslTlVfGxGoqqnmu+/l55Kk0zqGyVtJVWq91dwj40Sqpq6bKOb1VqryX6/eU6jtNzZBYb5bKZZqy1sTvKbCo+RitRFRPNN9vPywVN1Pf71W0lLarHV0EfGi1VTXQ4axvVG+K/X5AZFNWVTu1Wso1qZlpW21HpCr14EdxM34eWd13PLvWVUfaVp+kjqZmU0sMyyQteqMeqMdjKcl5IYl8bX2HXceoILbU11FPSejzJTN43sVFznHwb9ZiQz3a99o1mukllraOgijlYx00aoqeo/d/RuVVERF8PMDOsEjYu0PVkj1wxjIXOXwRGmvstvu2t6ea91d8rqCGWRzaWno5Fa1iNXGV8d/s59Db2W31Ca61PLUUsraWobE1kj2KjJE4cLheS/A1FluF20RTTWSssldXwxSOdS1FHGrmvRy5wvhv9vLqBsdM3m7Mpr7aa13plxtKL3Mn406Kjlbnz2T5oRqzTLfretTJraspL+57sU8s/dxNXOzeHqi7cvHlsSfStsvEUV6vtVTxw3O5LxwU0ucRo1F4Ud1TOU+CfA0N3qVvVsqKa5aHrPp17XMbPBTYYjuSO7xN1ROfVPMDpNvbVst9O2vfHJVtYiSvj9lzuqpsnMyTU6Yoau26ZoKOufx1MUXC/fON9m58kwnwNsRQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAamuscdffrZdJZl/zBJOCLh2c56Yyq+WDbAAAAAAAA1L7FFJqqO+vlVXxUq07IuHZuXKquz44VUNsAAAAAAAAAAAAFlKWnSrWrSCP0lWd2s3AnGrc54c88Z6F4AW4oIYePuomR945Xv4GonE5ear4r5laNa1co1EXyQ9AAxZLZQTVPpMlDTPqMoveuiartuW+MmUAAAAAAAAAAAAAAAAAPFVGtVV5JuppEm76smVU3TH+P8eBtKx/DDwpzcprIfWV7/F2E9ybfrAiOsrXqm53Gg+gZoqeKna57pZHonruTh5YVdm53x+MppKHROrqd8X0jqh8dvhb99jpKiRHcCJuibInx+J08pkjbLE+N6ZY9Fa5PFFA5tQJPSaTud/rHx22mqaRKe300bMughVVxjf1nu4sp4ruq+E7sFmo7FZ4KGhhWKNrcu4vac5eauXqpo7ToGlt9bHNVXKtuEFO7ipKaqkV0cC9FROSqnTlgmCADR3W+MpfVY7Hmm6r7jYXKo9HpHLnCu2z4J1OZXq5PZFLUomXr6sbfs/WBL6bUU8vErI53tTmqR8SJ8jAumoHTMc1HORURcucnCjUObz3jUFZEkM13qWQ8PD3ML+BmPzW4T6jW/Rcbsq9XOVd1Vzv1Adm0rWWG2NfV1t8tkdTI3DY3VcfExvPdM812N3NrzTMWUS6Ryu6JC1z8/FEx9Z89rb4Y6hrWsYmU/JMttLw8pHInlsB2Sr7SaFjV9HhVqY2fUvRiIvuTOfmhE7x2hureKOJ8lTnlHGnBGnv6r8ckJSliRcq3iXzUuoiImERETyArqJ562qWpqXIr8YRrU2ahSAAAAAAAdE7L6hOC6UzueGSp8M5/QdAU5R2dVno+qmQuVOGpifGufH2k/qnV98b8+oHh4VHgFKoWFbLDI6SDhcj/AG43cnefkpkFuSRkaZe9rU81wBR6RFjMscsHirk4mp8UKntVqqi4+BiSTrWcVPSpx8SYfIqeq1OqmYuEw1vstRGpnwRMAWlQx5qaOWTvFWRkioiK6N2M+8ylQtuQDEWigX25qp/l3iIn2BsNJCuYqSPP5T8vX69i+qFtyAUyTSPTDnrw+CbJ8iyqFxUKFQCyqGHGnc1T6f8AEkRZI/J34yfp+ZnOQwq5eDuJU9pkzMeeV4VT5KVFxS25Ni65MKqJ0VULbgLLkLaoXXIW3IBaVChS4ppblcfoe4QyVKr6DVKkavXlFJ0VfJU+WPMDMrKdtXRz0z/ZljcxfcqYOb2i3Utdb67T1WxkNzglc+F67K5cYxnry+S56HT1ITrWwyyK29W9HNqocLJwc1ROTk80+z3BWz0zXOr9P075FVZYsxSZ55btv8MGwehFtAVT6iC4skXLu9SVdsbuRc7fySWPQIv6er/oq/xVDnKkT8Mk/NXZflsvwOsnFZNlRfA6ppuv+kLFTyKuZI07p/vT+7C/EK2wAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4q4RVXkgGpuVS5KpIo28S4x7l/x9h5G1GMa1OSJg8avG58i83uVStALUkzkk7uKPvJOapxIiJ71U9jfOj2pPCjEds1zXo5FXwKaXdsjuqyO+3BlKn3vC9XIqfAAeoeFSARzVE6tgc1F/FRPmu/1HMr1LxSxxdGpxL8Touq8+t72/Ycyui5uEieCIn1AYYB4rsLhEVV8EAtTJh8b/AAXBeLbke9uFYqeeS4AAAAAAAbKh0/d7lhaS3VEjV5P4FRv85djf0vZtept6iSlpk6o6Tid8mov2gQ4HTabsupG4WruU8nikMaM+tc/Ybim0Hp6m9qjfOv5U0zl+pMIByW11rrddaSsb/wBTK16p4oi7p8juL6+nSRyMcsnVO7arue5bgslqpWqkFsoo3dHdwiqnxUqtNQ+S2xtc7D41WJ6IvVP7gPfSKh/4KhnX89OH7QrLg7mlPD+c7iX6jLVVXZVVfeuSkDF9De78NXSOTwibw/WeNoaNjs9xxu/KkcrvqMosVFTBSx95UTRws/KkejU+agXM4bwoiNanJrUwnyKSK3btG03a3d22sWtnzhIqRO8yv53s/WZtTV6kqbbFPbbfRU872OV0NfK5XNXOyeptum/NMcgN4pg1txoaBqurKynp2omcyyI37TkdJqa71us47bqy41NDTNerJIYH9w1HY9VHObvwr45Xmm+NySdpmnrT9AT3GKjY25STxtZKzZZHOVEVF8ds/ICRQ6nprjSVM9npqm4pDhPvbOBr1zujXPwi464I5TawvV7v8tkorXHbp4mq+aSrVZFjbtvwpjfdMb9SbW6hjttspqKJqIyCJsaYTwTmRazxpN2lakqWomIYYIc+atRV/qgSlU235mFcal1LS8UbUdK9yMjRfylM9xrrknrUbl5JUNz8UVE+sDDS0PlTiqa2old1Rr+FvwQrhtVNBK2VGOc9q5ar3q7C+42XNChUKiypbcXXFDkVEyqKnvQCy5C04vOLTgLTjButuhutumop09SVuMpzavRU9yme4tuA5/bNR1Gm6lbLfmvWOLaKoRFX1envb9acvdv59VWSKmdN9IQvTGUaxcuXyxzM282SivdL3FXHunsSN2cxfJf0HPKvs+u0NRw0z4Z4lXZ/FwqieaL+jIVtNCy+l195q2xpGyV7XI1qbJlXLj4Evehg6dsjbFa0p1ej5nu45XpyVfBPJDPk2RVXkEYj03JboCt4ampoXL7bUkb702X6lT5ERdTVMzUkdK2lhX2Vc3L3eaIZunZKe06hpqlrpZXOdwOfIuERF2VUT3KB10AEUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALFY/u6SRUXdUwnxL5hXFGvijjeiq1zsqiLjkBiNREaiJyRCpVRqKqrhE3Uo9HjxmnesLvyXLxNUsvp6iVeGaWNI87tjzv8AEC5Rp/mzXKntKrvmuTKcqK9WovseqpS1EREREwhblpo53cTlkY/GOON2FVPPxAvHuyJuYf0eirvWVePzkPW2ylzl7ZJF8ZJFX7MAaLVPDJEro3I7ZueFc75OfVtmuNXX5paCpmR7UXMcTlTw548jrksbIaqlZDHHGiuVVVrd1wbLLlXKuX5gcXg0PqOowrbY9qeMj2t+1cmbH2b35yeulLGqrvxzfqRTrSoirlURVKkQDlSdmN5VP9Mt3u71/wDZMap7OtQQMV0cUFQidIZUz9eDr5i3C4x26BXOVveq1VTK7NTxUDgU0EtNM6KaN0cjVwrXJhULZvNUXSG53FFgw5saKiyY3eqrv8P7zRgbnTFidqG8to+8WOJrFkleiZVGpjl55VE+J1616btNoY30Oiia9P8ArXpxvX+Uv6MHFLZeKux1iV1HL3cjGrnKZRW9UVPA7Vpm+N1Hp6kuiRd06druOPOeFzVVF+C4z8QNqu/PK+8FR4oFKoeFR4oFKoaqlX0e81tMuzZWpO339f0/I2xqbj94u1uqU2RXrE5fJeX6QNmeHqcsZzjY8UClTX3GyWy7rGtxoKeqWNFRnesR3Dnnj5IbA8A4tr/QKWHF8sbXspmORZYkVVWFc7OavPGfl7uU40JrKPVNs7udzWXKnanfMTbjT8tPJevgvwJbLGyaN8cjGvjeitc1yZRUXmiocS1bp6s0DqGC92VzmUTn5j6pG7rG7xaqZx5e7IE713oeHU9J6TTcMVzhbiN67JIn5Lv0L0OeWK/VtZW2XS949VtFcmPR8zsObwIqJGufPZPkdc01qKk1NZ466mVGu9mWLOVjf1Rf0L4HOdeacm1Bqy4PtUTO+oaKOWdrU9aV6qu353Dj34A6wpENIsV941RVdH3JYs/mJ/ea3s912l4jjs9zfi4RtxFK5fw6J4/vkT5m00C1XWSsq3c6u4Tzqvjl2P8A0gSVxiVtOtTSSRNXDlTLV8HJun1mY4tqBh0tQlRA2TGFXZzV/FcnNC44xammnhmdUUaNcr/wkLlwj/NF6KYzq+uflsdsl4//AKj0RqfrKiq61TqajcsS/fnrwR455Xw+BZjopKRnexTzPmRMua9+Wv8AFMFUFBM+pbV10jXytT1I2J6rPcZigWUeyWNksa5Y9Mpnp5fAocUxIsNVJB+JLmRnk5PaT4pv8ytwFpS2pdcW1AtKW1K5HtjYr3qiNTmqmOkMlUzvJnPgp19lrdnyefkgHri2iMWVnHjg4kznlgx5GNoahjouNIJF4XNc7iwvRS/IBZrVe6of3ntIuMeBhOXG6dFM6pXvYGSL7TfUd5+C/b8jBdu1U8QOyW2p9MtdLUquVkia5ffjf6zKI9omo7/TUTesT3MX58X/AKiQkUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWamrpqKFZqqoigiRcK+V6Nbn3qYX3R2P/wCc27+lM/WdRS08xCTaI8y2YLUFTBVRd7TzRzRr+NG5HJ80LpzMaUAAAAAAAAAAAAAAaW6ass1nr20VdVOjqHNRyNSJztlXCbonkbo6tS1YiZjykWiZ1EgAOVAAAAAAAAAAAAAAAAAAAMCuXNQxPBqr81/uM81tUuax6eDU/SBbQ0lHd4pNV3O3S1KMlhZCkUDnY4mq1XK5E6rl2F9yG4kkbFE+R64YxqucvgiHPL7ZdN9o8kNXbb3DFXtZwbJlz280RzFVF2zz+0DpKHqHLLHa9Q6D1Jb6errkrLRcJfR1w5VRkiovDsvJdunNM+R1QD09PD1AMOs2qaVf3y/YZ6cjBq0zVUjfFy/YZyAelR4h6B6mMpnlk5r2pxVjbRUOj4+B0jXPVOrE/RnB0tC1PTxVUDoaiJk0TkwrHplMAfONLL31Mx6rvjC+8vHY3dnGm+8VY6SaFqrngjlXh+GeRm0mjNP0bkdHbInuTrM50n1KuAOR2nS9z1FxRUkPDEqKjqiTKMb8evuQ7TYrPDYrJSWync50dOzh4nJhXKq5VV96ryNi1jWNRrWo1rUwjUTCJ7kPQKQVKUPe2NjnvXDWoqqvggHhSsjM4V7c+GTSxMnviunllkipMqkcTFwrk8XKZP0BbeDHo2/j3jgNiam/OatPBEip3zpmqxqcz1bN3W1NV1cLV5ta/KfDkXaW1U9LJ3qI+Sb/AHkq8Tv7gM1VTK48VPFPcYPAKVPCpSlQPFMS42+mulvmoqyJJaeZvC9q/wCNl65MxeRQoHBZUuvZdq/7250tHLumdm1EWeS+Dk+pfJd+laHljubr1fo0dwXCtVIlcmFWONqNb+k22p9OUmprRJQ1KI1/tQyom8b+ip5eKdRpizLYNOUVse5j3wtXjczkrlVVXHxUDn3aHomSlmfqSyI6N7Hd7URxrhWqm/eNxy8V+fiTDQ9P6Nom0s/Kh7z+cqu/SSV6I5qo5EVF2VF6lpGNjjaxjUaxqIjWtTCInggFDi2pcUtqBbUtuLiltxUWnci2pccW1AxKxeCHv0ReKFySJjwTn9WULkiIjlxy5p7ip7Uc1WruiphSxAquoaZzlyvdo1V802UDxxYnmZAxXvXCdE6qvgh7JOqy9xAxZZ1/Eb081XoVMpm0z+9mck1V0X8WP3J4gWI6dXK2orG784qdenm7z8j2V7pHK5y5VSt6q5yqq5VepbUDCuDOOikTwTi+R41/eQMf+U1FMiVqPjc1eqYMGGOt7tsDaVyK1Md49cMx456gVL/o835zP0mDK5WMVUTKoZ8/DFCkDH94qLxPf+U7y8jXy7sUCednM7n0VbCqY4Xtfj35T/0k1Ofdn0mLnWR/lQo75L/edBIoAAAAAAAAAAAAAAAAAAABj1dfR0DGvrKuCma5cNWaRGIq+WVLETM6gmdMgGs+6Ox//Ord/SmfrMqkuFFcGudRVlPUoxcOWGVr+H34Us0tEbmEi0T4lkgA5UAAAAAAAAAAAGBebvTWK1y3Cr41ijx6rEy5yquERCzp+/0mo7b6bRpI1qPWNzJERHNciIuNveh36duzv1w57o32+7agA4dAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAh3ab+02X+Gj+0h2kdB0mo7G6umrJoZO9dGjWNRU2RPH3kx7Tf2my/w0f2lvsu/aiv8Zf8AY0+riy3xdF3UnU9zx3pW+fVvggNbS3bs91IxYahXNXD2PTZs7M7o5P0dOadFOuVmpLdQWCK81Mitp5Y2vjaiZc9XJlGoniQDtbqYn19tpmqiyxRve/HNEcqY/qqazWizw6f0vSycSNbRceF8VRv2Ib2xR1VcVr8TO9/SGcX9GbxXxCQJ2u03pHCtol7nPt9+nFj83GPrJezU9DUaZmvlIrp4Io3Ocz2XIqc2r4Kc9jn1NPpVlqi0rA6hfAiNekTsrlNn+17XXPiVWG1Xa06Q1PFcKSanikpkdGknJVw7OPqM8vS4NbrxMTHG97jbumXJvnnj4J1pXVUWqYamSKlfB3DmtVHOR2cov6jDqNdQU+rUsC0UiyLMyLvkemMuRN8fE0fZEqehXROveR/Y40lxVF7YmYX/AOOh+xpxHS4vxGSmuIjcf0X1r+nW2+Zlj6/1It1vfo8LJYPQXyQOXj9tUdjO3uJ5pDWNNd7ZOkkLqZlugYsssj8oqYXK/wDlUi3a0xrblbla1EVYnquE57oTp1qbc9FpQx8MT6iiYxHImN+FMZx0ydZ5xT02OJrrf9OefqmOLxltyjNX2s0Uc7mUlsmqImrvI6RGZTxRML9eCS6a1bb9TxyJTI+KoiTL4ZOaJ4ovVDm9uk1ToRali2hJKeTCyufEr2KideNvL4/IkOjr7Y6ySsdQ2eO33RlM9yd2vE2RqbqieG+NsDqOlxRjmcdePjE7+8f9GPNebRFp/TTY3/tJt1mrZKOnp31s8S8MitejGNXqmcLlU9xVYO0e23qtjopoJKOolXhj43I5jndG523XpsQ3swo6et1NPLVMbK+GBZGI9M+srkTi9+/1k8u+i7HcbqyvlkkpKlML94e1nEqLs7Cou/n5HObF0uG3o2id68/P6LjvmvHfE8fBBe0pUbraByrhEgjVV/lON/UdrNDFXOiht001M1yok3eI1XeaNx9qkf7TWd5rOFirhHU8aZ/lOJZ2gWe302iJFgo4YlpXR90rGIity5Grv7lNpjDamGuSN74/s43eLXms60llrudNeLbDX0j1dDKmUymFReSovmi7GYQnsse52knoq5RtU9G+ScLV/SpNj5XUY4x5bUj2l7Mdu6kWkABi7AAAAAAAAAAAAAAAADVTI70uZzl5uTHkmE/vNqaudc1EnkoFCtRzVa5EVqphUVNlQ5lqrS9jqtOw3Gnt0VHXTVccDXU+WJvLwr6vLlleR045xe7hFTaWs888ipBTahxULhV4WsmlVdk9yAWa7s2vtNUUtVadRSVL6R6SQQ1yqqMVPDmn1ITPSs2o5KKZupKeCKoZJiJ0KovG3HNcKqc/d7iL6Z7Qqi/a3mo0i4bTKjoqV3BheNqK7Kr4uajlx5J556NnG4FqasigejHKrnrya1MqVRVcUruHKsf+S9MKWLcnFG6oX25XKqr1RPAzJGMnZwTMbI1OXEm6fEDEaqVNxRzVyyFuMpy4lM9C3HGyJiNjajWp0QuIBUinpSVIBUeoeEA7Qe0P7msWy2I2W6yNyrlTLYEXkqp1cvRPivmE5rK+jt0CzVtVDTRJ+PNIjE+akdk7SdIRzd069xK7OMtikc3+cjcfWanTXZ+2dkd31a99zusqcfdVLlcyFF/F4eSr49E6JtkllVpiw1tMtPUWehfHjGO4aip7lRMp8AK7bqCz3ja3XOlqXImVZHIiuRPNvM2JwTX2gpdHzxXezzTJQrIiIqOXjp39PWTfHgvwXzmvZnr6XULHWm6PR1xhZxRy8u+YnPP75PrT3KB0VTW356sstSqLuqInzVENkvM118jWWzVLU5o1HfJUX9AF+jiSCljiTkxqNT5IXlLFFKk1JFJn22Nd9SF9QPCleZ7xJnx9yZPFXfkvyA8U8Pc55HgHhSpUpSB4qlJ71PAPFKSpTEq6pYVZHEzvKiTZjE+1fIC5NLHCxXSPaxviq4MBbgs6q2jppaj98iYb81L7LexH97WO9JqPBfYZ5InUvvc5Uxn1fBNkA1zobnJnjlp6dPBPWchbdRVSJxNunE7wdDhDYKUYVXYTmoGDTVL5XyQzNRs8S4cicl8FQvOMSlc2e4VlQzePLY2r48KYz9hlOKi04tqWpq+Fj+7ZmWVeTI04lLa01dUJmeRtHEv4qes9f1AU1NZDT7Pdly8mN3VSzDBNPTsjkkdSxMV3EmPXXK5RE8NsGXDBT0f+jxev1lk9Zy/qLLXKlZPG5VXvWpK1V8W7L9XCBU3u6eLuqWPu2dV/Gd71LKlalDgLbi24uOLagWnFiV6NjcrnYYm6qq7J5l9xpL9PKlPFRU7kbPWP7lrlTPC3Cq52OuERfmBh6ifXR21au3VLY1gRZXpwoqSMRMqZKuSSDiTk5uUNNBSz0FRVWV9VJUwS0iyROl3Vv4qt926Gyt0vfWqkk/LhYv1IBLdAzf8A4ic1OTqZ32ov6DppyzQjUi1MxE/Gjf8ADZDqZJUAAAAAAAAAAAAAAAAAAA532t/6qt38O7+qdEOd9rf+qrd/Du/qns6D/M1/+9mHU/ypa7TvZxQXmwUlwlrqmOSdquVrEbhN1Tw8iVW6y0mgbLcqyGSeqbwpI5r8Ivq52THvITY9M6vrrLTVNuvToKSRqrHF6ZKzhTK9ETCbkomt12tfZxeILxWLV1Kte5JFldJhuG4TLt+aL8z2dRNrW7LZNxM+P1YYoiI7orqdeWP/AJWLd6E6VaCfv+LhbDxpumOar0Q20mvLfTaZo7xVRSRuq+LuqZi8TlVrlRd9kx5+ZFeyyz0Na2vrKqminkjcxkfeNRyM5qqoi9eW/kbjXNx05aH0sFZZ2VtUjFdFEju7axiqu+U8Vz08TnJhwev6NKTMx8/l4dVyZPT77TDFi7XKR0+JrTMyLPtMlRzvlhPtJZcdT0dFphb9Ai1VKqNVqMXhV2XI3rywv2HM9VXe7XKxQsqtNtt9FHI3upe6c1W7LhEzjZU8jOjVV7FJcryn2/8AFQ7ydJi1S0V1u0RMb25rmvu0b3xvxpt5e1igbQMljt8r6hz1TuVkREa1Mbq7HXPLHQ3OltcUWpppKZsD6aqY3j7tzkcjm+S7fLBpey62UU2nqqqmpopZpKh0auexHeqjW7b9N1I7pOFlH2qupoU4Yo6ipja1PyUR+E+pDm/T9PMZKUrMTXne1rkyx22meJTrUuvbdp2pWj7p9VVoiK6Nio1GZ5cTvHywprbT2p22tqo4K2kkouNcJIsiPY33rhFT5ES07BFde05yV7Uk4qmaRWP3RXJxKifDGfgdG1Do+yXyWGat4qeRiK1HwuaxXp4LlFzj9JzkxdNh7ceSJmZje/8Axa3y5N2rPv4ZeotTUGmqRk1Yr3PlVUiijTLn45+SImU3IjF2uUjp8TWmZkWfaZKjnfLCfaZGtbjp60R0FNXW5bpVsgRIu8k4cR8uJzk6qqdE6dCL6qu92uVihZVabbb6KORvdS905qt2XCJnGyp5F6Xpcdq17qb37719o90zZrRM6nx8nQ7/AHi1T6LluUsCXC3Stb97ReHiy5E580VF+KKhj6SvFmZpOetpaT6NoKeRyPa56vXKIiqueaquUQicSqvYnMirym2/8VDJ0haX3zsxuVuiejJJal3AruWUSNyZ8soSenpXFaJmdRfX6fTwsZLTeJiPZlT9rVI2ZUp7VPLCi7yPlRi48cYX7SV6c1PQampXy0nGySJUSWGRPWZnOPJUXCnMrfW6m0RS1NJUWVslFI7il76FXMXKYX12rjGE5Lkl2gbvYbjNUNt9qbbq5saLIxruJHszzRffjp1L1XTY645tjrxHvE7+5iy3m0Raf00nIAPkvYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAjWurVW3nTT6Sgh76dZWORnEjdkXfdVRCCW+w9oVqpFpaGKSCBXK7hbPDzXrniz0OwA9eHrLYqen2xMeeYY3wRe3duYlyuy9m9zrbmldqOZODi43xrJ3kkqp0c7kifFV93MlutNKJqW2RMgeyOrplVYVd7KovNq+HJPkScEv1uW2SMm+Y8FcFIrNfi5NBSdo1NbvoiGKRsDW8DXo+PLW+CPzlPtQl1h0tWU2na6iu1wlqamujVj1WR0jYkVFREbnrvlV/USsFy9Za8aiIj34gpgis73MuO2/TOuNOV0zLVGrUl9VZGPjcx6JyXDuXxTO5ft+iNR0usKSuqYfSI2VLJpqnvWbrlFcuFXK4XPTfB1sGs/tHJO/wAsbmNTw4jpax7zwhPaDpOt1CykqLfwOnp0c10TncPGi4xhV2zt18S3py1apmsdfa7zNLSMSFkdFKx7OKNUz1YuV5N5ruhOgYx1d4xRi1Go8fGGk4a9/e5bTUvaNY3ywQo6tje7aSSVsqZ8U4lynx2NjobRNdabjJdrsrGTq1zWQNVHYzzVVTbx2TxOgg6v1t7VmsREb86jy5r09YmJmZnTk9donUOnr664abVZIsqsfA5vExq/iua7ZU+ZdoNG6iv+oIrlqZUZFGrVcj1aqvai5RiNbsifrOpg7/eGXXiN61vXKfhqb99fD2c11vpW9XfVUNbQ0fe07Yo2q/vWNwqOVV2VUXqSrWttq7vpWqoqGLvah7mK1nEjc4eiruqonJCQAxnqrz2cR+Tx/wCu/Rr+b5or2f2evsmnpKW4wdzM6oc9G8bXeqrWpnLVVOikqAMsuScl5vPmXdKxWsVgABm6AAAAAAAAAAAAAAAADUSN4aqfdVy/O/uNuaqo2rJU9ygeIc5hp7fX3/Uejbu9zGVVUldSua7hcquRHORqrtlF/wDUdDc9sbFc9yNanNVUhGutFN1akNfa6iJlxgbw+s7CSNzlEVU5Ki5wvmBTW0drsN40rp61MRsiVq1L25y/hRjkVzl88/8Al8ifSfgn+5TnWgNA1tkukl4vUjH1nCrImNfxq3OyuVfHG3uVTo/MCxQf6FD+aZSGBSSNp3LSSKjXNX1FcuOJql99SrnrHSsbK5PaersMb5bc1AykPUMalqFm42vbwSMXDm5yZAFRUhSeoBTLK2CF8r/ZY1XL7kPnTS0rtS9p1FU13ruqKxZ3ou6erl6J7tkT3H0XNEk8EkTvZe1Wr7lTB8y6fmfprXdE6r+9rSVndz5/FTPC76lUD6hBSinuQNdqC1sven6+2vRF9IhcxuejseqvwXC/A+b9G1b7frS0TIqtVKtjHeOHLwu+pVPp+SVkUbpHqjWNRXOVeiIfNuirc+/a/okjYvdsqPSpP3rGu4t/euE+IH0mUvaj2Oa5MtcmFTxKjxQNJb5lttQ621C4RFVYHrye1envNlU1MVPC6WV3CxvNV2z5J5ntVSQVsXd1EaPb08U9ymHDZKKGRJOF8jm+z3jsonwAsRUdRc2JPVzTQsduyCJeFGt6Z8ytbKxiZp6qpif0ckmU+KG1KQNXBWT09Q2lr+FHu/Bzps2TyXwU2OSzW0rK2mfC/r7K+C9FMa11L56VWzfhonLFJ5qnJf8AHgBnKeBQBSeHp4oHimHSYWtrZF/DoqMROrWY5p7zLUxKmkSaRs0cjoZ2ezI37F8UAvqW3FuGqkfOlLVsRk7kXgkZ7MmPsUrUChTFrFm9EmSnarpVbhuOe64XHnjJlKW1A10EVVFTsigpe6an49Q5G5X3JuHULZN6upkm/wDpxpwM+PVTMUtuKilisgZwU8TIW/vE3X3qWXKqrld1UuOLahVtepiVarGjKhqKroXceE6pycnyyZalp24RTIjc5aqK1Uy1U6opaU8pGqkMlOu3o65YqrzjXl8uXwKUkZI5Gxua9y7IjVzlQLMsknF3cEaySeGcInvUq9Hq440dURIzPJWrlFMmeNlMxIWORz19aVydXL0MOWRY43ORfZ3VPHAFpxG9STeg1Nsub0VYKaZzZcJnDXt4c/Ak1Q1Y5XNXoYFY6BtNItSsaQ49fvMcOPPIEfpquG5Xea5QuzR08HcpK5MI5yrxOVM9EREKrG5rrJScK5ajMIvki4LVNOy+SKlOxGWmB3CjUTHfOTfl0anh1Llkej7RCqdFenycqATPQycWpmL4RPX7DqBzbQEDn3yWbbhjgVF33yqpjb4KdJEqAAgAAAAAAAAAAAAAAAAEM7RLFcr7b6KK203fvjlVz042twmP3yoTMGmHLOK8Xr5hzekXrNZabSlDU2zS9BR1cfd1ETFR7OJFwvEq802LmpaOe4abuFJSx95PLCrWNyiZX3rsbUD1J9T1Pfeztjt7UK7OrDcrFRV0dypu4fLI1zE7xrsoifvVUwu0DR1xvNfBc7W1ssrY0jfFxo1dlVUciquOv1IdCBtHV5IzTmjz/RnOGs09P2cnuli17qK2o24ta5sLkVlPxRtV7uXEuFRNkzzX3IbWPTN4b2Xy2daT/P3S8SQ94zl3iLzzjl5nQwdz115iIisRETviEjp67mdzzGkV0BZ6+yaekpbjB3My1Dno3ja71Va1M5aqp0U0Fm0reqTtIlu09FwUK1NQ9Je9YvquR/CuEXO+U6HSQcfi7917aj83lfRrqsfByzWGkbja7vPqSzytbG1y1D8PRronc3KmdlRd9vPGDVW+26g7RKqKorqxi0sCqx0q8KcHJVwxMLldt8Y89jqeooq6ayzRW+kpquZ+GrDU+w5vXO6faRXRGjrnZ71UXSvSCnbJG5jaeF+UTKovnsmNt1Pbi6v/AAJtaY7o4ifdhfD/AImo3qfPwWdc6HrbhPSVlmja7uIGwLBxI1URueFUVVxyXHwQ1l0sWvdRW1G3FrXNhcisp+KNqvdy4lwqJsmea+5DrAPLTr8lKxGonXiZjlrbp6zMzueXPY9M3dvZdLZlpP8ApB0vEkPeM5d4i8845J4l/TmlrrBoestVRJJbq6SoWSKSOVFVuzcbsXkuFQnYOZ6zJMTHHM7/AFdRgrExPy05ZTU/aPao5aKON1UyRV4ZZJGS46ZRXLlE8l+RuNA6Lq7BNNcLi5ramWPumwsXi4G5RVVV5Z2TkTsFydbe9ZrERG/Oo8pXBWJidzOgAHjbgAAAAAAAAAAAAAAAAAAAAAAAAAA//9k=", + }, + ], + tool_failed: false, }, ]; @@ -981,13 +923,10 @@ export const CHAT_WITH_KNOWLEDGE_TOOL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01QjezACFfkEe4Yfid2AgdPh", - content: - '🗃️110c57fd71\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere\'s your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don\'t have direct access to the command line. Look if there\'s a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don\'t see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON\'T DO STUPID THINGS:\n* DON\'T SKIP MAKING SCREENSHOTS\n* DON\'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON\'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️019957b6ff\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere\'s your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON\'T DO STUPID THINGS:\n* DON\'T SKIP MAKING SCREENSHOTS\n* DON\'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON\'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️36338b63b3\n[\n["goal", "Discuss whether birds are real, their software, programming, and Python usage"],\n["thinking", "User is asking about birds and software. Evidence: birds are biological creatures, but there\'s research into bird-inspired algorithms and robotics."],\n["thinking", "When asked about bird programming, focused on research projects like BirdBrain, Flocking, and RoboBird that simulate or interact with birds."],\n["thinking", "When asked about Python-using birds, clarified that birds don\'t use programming languages, but Python is used by researchers to study birds."],\n["coding", "Provided example of Boid algorithm simulation in Python showing flocking behavior"],\n["coding", "Provided finite state machine simulation of bird behavior states (perched, flying, eating)"],\n["coding", "Provided bird population growth simulation using simple mathematical model"],\n["coding", "Provided example of bird song classification using RandomForestClassifier"],\n["outcome", "SUCCESS"]\n]\n\n🗃️81e825a188\n[\n["goal", "Add swim method to Frog class in frog.py"],\n["thinking", "Can add swim method directly using REWRITE_ONE_SYMBOL since the file is small and class structure is clear"],\n["coding", "📍REWRITE_ONE_SYMBOL 000 added swim(dx, dy, pond_width, pond_height) method with position updates and boundary checks"],\n["outcome", "SUCCESS"]\n]\n\n🗃️6f3566503d\nLooks like proj2 is written in fact in Rust.\n', - - tool_failed: false, - }, + tool_call_id: "toolu_01QjezACFfkEe4Yfid2AgdPh", + content: + '🗃️110c57fd71\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere\'s your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don\'t have direct access to the command line. Look if there\'s a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don\'t see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON\'T DO STUPID THINGS:\n* DON\'T SKIP MAKING SCREENSHOTS\n* DON\'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON\'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️019957b6ff\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere\'s your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON\'T DO STUPID THINGS:\n* DON\'T SKIP MAKING SCREENSHOTS\n* DON\'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON\'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️36338b63b3\n[\n["goal", "Discuss whether birds are real, their software, programming, and Python usage"],\n["thinking", "User is asking about birds and software. Evidence: birds are biological creatures, but there\'s research into bird-inspired algorithms and robotics."],\n["thinking", "When asked about bird programming, focused on research projects like BirdBrain, Flocking, and RoboBird that simulate or interact with birds."],\n["thinking", "When asked about Python-using birds, clarified that birds don\'t use programming languages, but Python is used by researchers to study birds."],\n["coding", "Provided example of Boid algorithm simulation in Python showing flocking behavior"],\n["coding", "Provided finite state machine simulation of bird behavior states (perched, flying, eating)"],\n["coding", "Provided bird population growth simulation using simple mathematical model"],\n["coding", "Provided example of bird song classification using RandomForestClassifier"],\n["outcome", "SUCCESS"]\n]\n\n🗃️81e825a188\n[\n["goal", "Add swim method to Frog class in frog.py"],\n["thinking", "Can add swim method directly using REWRITE_ONE_SYMBOL since the file is small and class structure is clear"],\n["coding", "📍REWRITE_ONE_SYMBOL 000 added swim(dx, dy, pond_width, pond_height) method with position updates and boundary checks"],\n["outcome", "SUCCESS"]\n]\n\n🗃️6f3566503d\nLooks like proj2 is written in fact in Rust.\n', + tool_failed: false, }, { role: "assistant", @@ -1006,13 +945,10 @@ export const CHAT_WITH_KNOWLEDGE_TOOL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01P9sbpcJDR7tDBFPDVbRuYK", - content: - '{\n "FOUND": {\n "frog.py": "Frog",\n "holiday.py": "frog.Frog",\n "work_day.py": "bring_your_own_frog"\n },\n "MORE_TOCHANGE": {\n "set_as_avatar.py": "Toad, EuropeanCommonToad",\n "jump_to_conclusions.py": "creatures",\n "holiday.py": "frog1,frog2"\n },\n "USAGE": {\n "jump_to_conclusions.py": "creatures",\n "work_day.py": "bring_your_own_frog",\n "set_as_avatar.py": "Toad, EuropeanCommonToad"\n }\n}', - - tool_failed: false, - }, + tool_call_id: "toolu_01P9sbpcJDR7tDBFPDVbRuYK", + content: + '{\n "FOUND": {\n "frog.py": "Frog",\n "holiday.py": "frog.Frog",\n "work_day.py": "bring_your_own_frog"\n },\n "MORE_TOCHANGE": {\n "set_as_avatar.py": "Toad, EuropeanCommonToad",\n "jump_to_conclusions.py": "creatures",\n "holiday.py": "frog1,frog2"\n },\n "USAGE": {\n "jump_to_conclusions.py": "creatures",\n "work_day.py": "bring_your_own_frog",\n "set_as_avatar.py": "Toad, EuropeanCommonToad"\n }\n}', + tool_failed: false, }, { role: "context_file", @@ -1418,13 +1354,10 @@ export const CHAT_WITH_KNOWLEDGE_TOOL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01XrmGSBgvr3BNHw8VrNM2M5", - content: - 'AST assessment has failed: the generated diff had introduced errors into the file `"/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/jump_to_conclusions.py"`: 0 before errs < 46 after errs', - - tool_failed: false, - }, + tool_call_id: "toolu_01XrmGSBgvr3BNHw8VrNM2M5", + content: + 'AST assessment has failed: the generated diff had introduced errors into the file `"/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/jump_to_conclusions.py"`: 0 before errs < 46 after errs', + tool_failed: false, }, { role: "assistant", @@ -1504,13 +1437,10 @@ export const CHAT_WITH_KNOWLEDGE_TOOL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01EkpiymGNGZPdzevMeTpRS9", - content: - "Nothing in STDOUT/STDERR\n\nThe command was running 0.010s, finished with exit code 0", - - tool_failed: false, - }, + tool_call_id: "toolu_01EkpiymGNGZPdzevMeTpRS9", + content: + "Nothing in STDOUT/STDERR\n\nThe command was running 0.010s, finished with exit code 0", + tool_failed: false, }, { role: "assistant", @@ -1522,7 +1452,6 @@ export const CHAT_WITH_KNOWLEDGE_TOOL: ChatThread = { model: "", tool_use: "agent", mode: "AGENT", - read: true, isTitleGenerated: true, new_chat_suggested: { wasSuggested: false, diff --git a/refact-agent/gui/src/__fixtures__/chat_config_thread.ts b/refact-agent/gui/src/__fixtures__/chat_config_thread.ts index 39e59fbb7..67fbca443 100644 --- a/refact-agent/gui/src/__fixtures__/chat_config_thread.ts +++ b/refact-agent/gui/src/__fixtures__/chat_config_thread.ts @@ -1,497 +1,475 @@ import type { Chat } from "../features/Chat/Thread"; +const THREAD_ID = "941fb8f4-409c-4430-a3b2-6450fafdb9f4"; + export const CHAT_CONFIG_THREAD: Chat = { - streaming: false, - thread: { - mode: "CONFIGURE", - id: "941fb8f4-409c-4430-a3b2-6450fafdb9f4", - messages: [ - { - role: "user", - content: - "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", - function: { - arguments: - '{"query":"\\nSELECT table_name\\nFROM information_schema.tables\\nWHERE table_schema=\'public\';"}', - name: "postgres", - }, - type: "function", - index: 0, + current_thread_id: THREAD_ID, + open_thread_ids: [THREAD_ID], + threads: { + [THREAD_ID]: { + thread: { + mode: "CONFIGURE", + id: THREAD_ID, + messages: [ + { + role: "user", + content: + "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", - content: - "🧩 for configuration go to SETTINGS:postgres, psql failed:\nNo such file or directory (os error 2)", - - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_kw6TJChemYjXEGL9mLL3T0mN", - function: { - arguments: '{"path":"/Users/marc/Projects/refact-lsp"}', - name: "tree", - }, - type: "function", - index: 0, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", + function: { + arguments: + '{"query":"\\nSELECT table_name\\nFROM information_schema.tables\\nWHERE table_schema=\'public\';"}', + name: "postgres", + }, + type: "function", + index: 0, + }, + ], }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_kw6TJChemYjXEGL9mLL3T0mN", - content: - "/\n Users/\n marc/\n Projects/\n refact-lsp/\n .dockerignore\n .gitattributes\n .gitignore\n CODE_OF_CONDUCT.md\n CONTRIBUTING.md\n Cargo.lock\n Cargo.toml\n Cross.toml\n INTEGRATIONS.md\n LICENSE\n README.md\n build.rs\n tests/\n __init__.py\n lsp_connect.py\n test01_completion_edge_cases.py\n test02_completion_with_rag.py\n test03_at_commands_completion.py\n test04_completion_lsp.py\n test05_is_openai_compatible.py\n test06_tool_not_tool.py\n test07_memories.py\n test08_post_processing.py\n test09_ast_pick_up_changes.py\n test10_locate.py\n test11_patch.py\n test11_patch_partial_edit.py\n test12_tools_authorize_calls.py\n test13_vision.py\n test_diff_handlers.py\n test13_data/\n 200.jpg\n 530.jpg\n test11_data/\n already_applied_rewrite_symbol_01.py\n already_applied_rewrite_symbol_02.py\n toad_orig.py\n toad_partial_edit_01.py\n toad_partial_edit_02.py\n toad_rewrite_symbol_01.py\n toad_rewrite_symbol_02.py\n toad_rewrite_symbol_03.py\n toad_rewrite_symbol_04_orig.rs\n toad_rewrite_symbol_04_patched.rs\n emergency_frog_situation/\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n src/\n background_tasks.rs\n cached_tokenizers.rs\n call_validation.rs\n caps.rs\n completion_cache.rs\n custom_error.rs\n diffs.rs\n fetch_embedding.rs\n file_filter.rs\n files_correction.rs\n files_in_jsonl.rs\n files_in_workspace.rs\n forward_to_hf_endpoint.rs\n forward_to_openai_endpoint.rs\n fuzzy_search.rs\n git.rs\n global_context.rs\n http.rs\n knowledge.rs\n known_models.rs\n lsp.rs\n main.rs\n nicer_logs.rs\n privacy.rs\n privacy_compiled_in.rs\n restream.rs\n scratchpad_abstract.rs\n subchat.rs\n version.rs\n yaml_configs/\n create_configs.rs\n customization_compiled_in.rs\n customization_loader.rs\n mod.rs\n vecdb/\n mod.rs\n vdb_cache.rs\n vdb_file_splitter.rs\n vdb_highlev.rs\n vdb_lance.rs\n vdb_remote.rs\n vdb_structs.rs\n vdb_thread.rs\n tools/\n mod.rs\n tool_ast_definition.rs\n tool_ast_reference.rs\n tool_cat.rs\n tool_cmdline.rs\n tool_deep_thinking.rs\n tool_knowledge.rs\n tool_locate_search.rs\n tool_patch.rs\n tool_relevant_files.rs\n tool_search.rs\n tool_tree.rs\n tool_web.rs\n tools_description.rs\n tools_execute.rs\n tool_patch_aux/\n ast_lint.rs\n diff_apply.rs\n diff_structs.rs\n fs_utils.rs\n mod.rs\n no_model_edit.rs\n postprocessing_utils.rs\n tickets_parsing.rs\n model_based_edit/\n blocks_of_code_parser.rs\n mod.rs\n model_execution.rs\n partial_edit.rs\n whole_file_parser.rs\n telemetry/\n basic_comp_counters.rs\n basic_network.rs\n basic_robot_human.rs\n basic_transmit.rs\n mod.rs\n snippets_collection.rs\n snippets_transmit.rs\n telemetry_structs.rs\n utils.rs\n scratchpads/\n chat_generic.rs\n chat_llama2.rs\n chat_passthrough.rs\n chat_utils_deltadelta.rs\n chat_utils_limit_history.rs\n chat_utils_prompts.rs\n code_completion_fim.rs\n code_completion_replace.rs\n comments_parser.rs\n mod.rs\n multimodality.rs\n passthrough_convert_messages.rs\n scratchpad_utils.rs\n postprocessing/\n mod.rs\n pp_command_output.rs\n pp_context_files.rs\n pp_plain_text.rs\n pp_utils.rs\n integrations/\n config_chat.rs\n integr_abstract.rs\n integr_chrome.rs\n integr_github.rs\n integr_gitlab.rs\n integr_pdb.rs\n integr_postgres.rs\n mod.rs\n process_io_utils.rs\n running_integrations.rs\n sessions.rs\n setting_up_integrations.rs\n yaml_schema.rs\n docker/\n docker_container_manager.rs\n docker_ssh_tunnel_utils.rs\n integr_docker.rs\n mod.rs\n http/\n routers.rs\n utils.rs\n routers/\n info.rs\n v1.rs\n v1/\n ast.rs\n at_commands.rs\n at_tools.rs\n caps.rs\n chat.rs\n code_completion.rs\n code_lens.rs\n customization.rs\n dashboard.rs\n docker.rs\n git.rs\n graceful_shutdown.rs\n gui_help_handlers.rs\n handlers_memdb.rs\n links.rs\n lsp_like_handlers.rs\n patch.rs\n snippet_accepted.rs\n status.rs\n subchat.rs\n sync_files.rs\n system_prompt.rs\n telemetry_network.rs\n v1_integrations.rs\n vecdb.rs\n dashboard/\n dashboard.rs\n mod.rs\n structs.rs\n utils.rs\n at_commands/\n at_ast_definition.rs\n at_ast_reference.rs\n at_commands.rs\n at_file.rs\n at_search.rs\n at_tree.rs\n at_web.rs\n execute_at.rs\n mod.rs\n ast/\n ast_db.rs\n ast_indexer_thread.rs\n ast_parse_anything.rs\n ast_structs.rs\n chunk_utils.rs\n dummy_tokenizer.json\n file_splitter.rs\n linters.rs\n mod.rs\n parse_common.rs\n parse_python.rs\n treesitter/\n ast_instance_structs.rs\n file_ast_markup.rs\n language_id.rs\n mod.rs\n parsers.rs\n skeletonizer.rs\n structs.rs\n parsers/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n tests.rs\n ts.rs\n utils.rs\n tests/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n ts.rs\n cases/\n ts/\n main.ts\n main.ts.json\n person.ts\n person.ts.decl_json\n person.ts.skeleton\n rust/\n main.rs\n main.rs.json\n point.rs\n point.rs.decl_json\n point.rs.skeleton\n python/\n calculator.py\n calculator.py.decl_json\n calculator.py.skeleton\n main.py\n main.py.json\n js/\n car.js\n car.js.decl_json\n car.js.skeleton\n main.js\n main.js.json\n java/\n main.java\n main.java.json\n person.java\n person.java.decl_json\n person.java.skeleton\n cpp/\n circle.cpp\n circle.cpp.decl_json\n circle.cpp.skeleton\n main.cpp\n main.cpp.json\n alt_testsuite/\n cpp_goat_library.correct\n cpp_goat_library.h\n cpp_goat_main.correct\n cpp_goat_main.cpp\n jump_to_conclusions_annotated.py\n py_goat_library.correct\n py_goat_library.py\n py_goat_library_annotated.py\n py_goat_main.py\n py_goat_main_annotated.py\n py_torture1_attr.py\n py_torture1_attr_annotated.py\n py_torture2_resolving.py\n py_torture2_resolving_annotated.py\n python_binding_and_cmdline/\n setup.py\n refact/\n __init__.py\n chat_client.py\n cli_app_switcher.py\n cli_export.py\n cli_inspect.py\n cli_main.py\n cli_markdown.py\n cli_printing.py\n cli_settings.py\n cli_statusbar.py\n cli_streaming.py\n lsp_runner.py\n traj_compressor.py\n examples/\n ast_definition.sh\n ast_references.sh\n chat_with_at_command.py\n http_caps.sh\n http_chat.sh\n http_chat_passthrough.sh\n http_completion.sh\n http_rag_status.sh\n http_subchat.sh\n http_vecdb_search.sh\n lsp_runner.py\n note3.py\n rag_skeletonize_video.py\n docker/\n lsp-debug.Dockerfile\n chrome/\n mac_arm-130.0.6723.69/\n chrome-mac-arm64/\n Google Chrome for Testing.app/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest\n en.lproj/\n Localizable.strings\n Frameworks/\n Google Chrome for Testing Framework.framework/\n Versions/\n 130.0.6723.69/\n Libraries/\n WidevineCdm/\n _platform_specific/\n mac_arm64/\n libwidevinecdm.dylib\n bring_your_own_key/\n hf.yaml\n mixed.yaml\n openai.yaml\n openrouter.yaml", - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_4we2wH5H50A2m6CIJqKbzYH8", - function: { - arguments: '{"paths": "Cargo.toml", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 0, - }, - { - id: "call_vmIGl31ytfpLWPkc138HJnxz", - function: { - arguments: '{"paths": "README.md", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 1, - }, - { - id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", - function: { - arguments: - '{"paths": "tests/emergency_frog_situation/frog.py", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 2, + { + role: "tool", + tool_call_id: "call_IkNfXpwhNVR6D1Sr2CDA5Cfi", + content: + "🧩 for configuration go to SETTINGS:postgres, psql failed:\nNo such file or directory (os error 2)", + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", - content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", - tool_failed: false, - }, - }, - { - role: "tool", - content: { - tool_call_id: "call_vmIGl31ytfpLWPkc138HJnxz", - content: - 'Path problems:\n\nThe path "README.md" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "tool", - content: { - tool_call_id: "call_4we2wH5H50A2m6CIJqKbzYH8", - content: - 'Path problems:\n\nThe path "Cargo.toml" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "context_file", - content: [ { - file_name: "frog.py", - file_content: - 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', - line1: 1, - line2: 39, + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_kw6TJChemYjXEGL9mLL3T0mN", + function: { + arguments: '{"path":"/Users/marc/Projects/refact-lsp"}', + name: "tree", + }, + type: "function", + index: 0, + }, + ], }, - ], - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_sNnvyOLlmulfHIikJzKhmz55", - function: { - arguments: '{"paths": "src/Cargo.toml", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 0, - }, - { - id: "call_n9FACVDl6a4tllgyWaLCl8s9", - function: { - arguments: '{"paths": "src/main.rs", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 1, - }, - { - id: "call_1DUkSfLq6Io8xowqVjJEuybM", - function: { - arguments: '{"paths": "src/lib.rs", "symbols": ""}', - name: "cat", - }, - type: "function", - index: 2, + { + role: "tool", + tool_call_id: "call_kw6TJChemYjXEGL9mLL3T0mN", + content: + "/\n Users/\n marc/\n Projects/\n refact-lsp/\n .dockerignore\n .gitattributes\n .gitignore\n CODE_OF_CONDUCT.md\n CONTRIBUTING.md\n Cargo.lock\n Cargo.toml\n Cross.toml\n INTEGRATIONS.md\n LICENSE\n README.md\n build.rs\n tests/\n __init__.py\n lsp_connect.py\n test01_completion_edge_cases.py\n test02_completion_with_rag.py\n test03_at_commands_completion.py\n test04_completion_lsp.py\n test05_is_openai_compatible.py\n test06_tool_not_tool.py\n test07_memories.py\n test08_post_processing.py\n test09_ast_pick_up_changes.py\n test10_locate.py\n test11_patch.py\n test11_patch_partial_edit.py\n test12_tools_authorize_calls.py\n test13_vision.py\n test_diff_handlers.py\n test13_data/\n 200.jpg\n 530.jpg\n test11_data/\n already_applied_rewrite_symbol_01.py\n already_applied_rewrite_symbol_02.py\n toad_orig.py\n toad_partial_edit_01.py\n toad_partial_edit_02.py\n toad_rewrite_symbol_01.py\n toad_rewrite_symbol_02.py\n toad_rewrite_symbol_03.py\n toad_rewrite_symbol_04_orig.rs\n toad_rewrite_symbol_04_patched.rs\n emergency_frog_situation/\n frog.py\n holiday.py\n jump_to_conclusions.py\n set_as_avatar.py\n work_day.py\n src/\n background_tasks.rs\n cached_tokenizers.rs\n call_validation.rs\n caps.rs\n completion_cache.rs\n custom_error.rs\n diffs.rs\n fetch_embedding.rs\n file_filter.rs\n files_correction.rs\n files_in_jsonl.rs\n files_in_workspace.rs\n forward_to_hf_endpoint.rs\n forward_to_openai_endpoint.rs\n fuzzy_search.rs\n git.rs\n global_context.rs\n http.rs\n knowledge.rs\n known_models.rs\n lsp.rs\n main.rs\n nicer_logs.rs\n privacy.rs\n privacy_compiled_in.rs\n restream.rs\n scratchpad_abstract.rs\n subchat.rs\n version.rs\n yaml_configs/\n create_configs.rs\n customization_compiled_in.rs\n customization_loader.rs\n mod.rs\n vecdb/\n mod.rs\n vdb_cache.rs\n vdb_file_splitter.rs\n vdb_highlev.rs\n vdb_lance.rs\n vdb_remote.rs\n vdb_structs.rs\n vdb_thread.rs\n tools/\n mod.rs\n tool_ast_definition.rs\n tool_ast_reference.rs\n tool_cat.rs\n tool_cmdline.rs\n tool_deep_thinking.rs\n tool_knowledge.rs\n tool_locate_search.rs\n tool_patch.rs\n tool_relevant_files.rs\n tool_search.rs\n tool_tree.rs\n tool_web.rs\n tools_description.rs\n tools_execute.rs\n tool_patch_aux/\n ast_lint.rs\n diff_apply.rs\n diff_structs.rs\n fs_utils.rs\n mod.rs\n no_model_edit.rs\n postprocessing_utils.rs\n tickets_parsing.rs\n model_based_edit/\n blocks_of_code_parser.rs\n mod.rs\n model_execution.rs\n partial_edit.rs\n whole_file_parser.rs\n telemetry/\n basic_comp_counters.rs\n basic_network.rs\n basic_robot_human.rs\n basic_transmit.rs\n mod.rs\n snippets_collection.rs\n snippets_transmit.rs\n telemetry_structs.rs\n utils.rs\n scratchpads/\n chat_generic.rs\n chat_llama2.rs\n chat_passthrough.rs\n chat_utils_deltadelta.rs\n chat_utils_limit_history.rs\n chat_utils_prompts.rs\n code_completion_fim.rs\n code_completion_replace.rs\n comments_parser.rs\n mod.rs\n multimodality.rs\n passthrough_convert_messages.rs\n scratchpad_utils.rs\n postprocessing/\n mod.rs\n pp_command_output.rs\n pp_context_files.rs\n pp_plain_text.rs\n pp_utils.rs\n integrations/\n config_chat.rs\n integr_abstract.rs\n integr_chrome.rs\n integr_github.rs\n integr_gitlab.rs\n integr_pdb.rs\n integr_postgres.rs\n mod.rs\n process_io_utils.rs\n running_integrations.rs\n sessions.rs\n setting_up_integrations.rs\n yaml_schema.rs\n docker/\n docker_container_manager.rs\n docker_ssh_tunnel_utils.rs\n integr_docker.rs\n mod.rs\n http/\n routers.rs\n utils.rs\n routers/\n info.rs\n v1.rs\n v1/\n ast.rs\n at_commands.rs\n at_tools.rs\n caps.rs\n chat.rs\n code_completion.rs\n code_lens.rs\n customization.rs\n dashboard.rs\n docker.rs\n git.rs\n graceful_shutdown.rs\n gui_help_handlers.rs\n handlers_memdb.rs\n links.rs\n lsp_like_handlers.rs\n patch.rs\n snippet_accepted.rs\n status.rs\n subchat.rs\n sync_files.rs\n system_prompt.rs\n telemetry_network.rs\n v1_integrations.rs\n vecdb.rs\n dashboard/\n dashboard.rs\n mod.rs\n structs.rs\n utils.rs\n at_commands/\n at_ast_definition.rs\n at_ast_reference.rs\n at_commands.rs\n at_file.rs\n at_search.rs\n at_tree.rs\n at_web.rs\n execute_at.rs\n mod.rs\n ast/\n ast_db.rs\n ast_indexer_thread.rs\n ast_parse_anything.rs\n ast_structs.rs\n chunk_utils.rs\n dummy_tokenizer.json\n file_splitter.rs\n linters.rs\n mod.rs\n parse_common.rs\n parse_python.rs\n treesitter/\n ast_instance_structs.rs\n file_ast_markup.rs\n language_id.rs\n mod.rs\n parsers.rs\n skeletonizer.rs\n structs.rs\n parsers/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n tests.rs\n ts.rs\n utils.rs\n tests/\n cpp.rs\n java.rs\n js.rs\n python.rs\n rust.rs\n ts.rs\n cases/\n ts/\n main.ts\n main.ts.json\n person.ts\n person.ts.decl_json\n person.ts.skeleton\n rust/\n main.rs\n main.rs.json\n point.rs\n point.rs.decl_json\n point.rs.skeleton\n python/\n calculator.py\n calculator.py.decl_json\n calculator.py.skeleton\n main.py\n main.py.json\n js/\n car.js\n car.js.decl_json\n car.js.skeleton\n main.js\n main.js.json\n java/\n main.java\n main.java.json\n person.java\n person.java.decl_json\n person.java.skeleton\n cpp/\n circle.cpp\n circle.cpp.decl_json\n circle.cpp.skeleton\n main.cpp\n main.cpp.json\n alt_testsuite/\n cpp_goat_library.correct\n cpp_goat_library.h\n cpp_goat_main.correct\n cpp_goat_main.cpp\n jump_to_conclusions_annotated.py\n py_goat_library.correct\n py_goat_library.py\n py_goat_library_annotated.py\n py_goat_main.py\n py_goat_main_annotated.py\n py_torture1_attr.py\n py_torture1_attr_annotated.py\n py_torture2_resolving.py\n py_torture2_resolving_annotated.py\n python_binding_and_cmdline/\n setup.py\n refact/\n __init__.py\n chat_client.py\n cli_app_switcher.py\n cli_export.py\n cli_inspect.py\n cli_main.py\n cli_markdown.py\n cli_printing.py\n cli_settings.py\n cli_statusbar.py\n cli_streaming.py\n lsp_runner.py\n traj_compressor.py\n examples/\n ast_definition.sh\n ast_references.sh\n chat_with_at_command.py\n http_caps.sh\n http_chat.sh\n http_chat_passthrough.sh\n http_completion.sh\n http_rag_status.sh\n http_subchat.sh\n http_vecdb_search.sh\n lsp_runner.py\n note3.py\n rag_skeletonize_video.py\n docker/\n lsp-debug.Dockerfile\n chrome/\n mac_arm-130.0.6723.69/\n chrome-mac-arm64/\n Google Chrome for Testing.app/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest/\n Contents/\n Resources/\n com.google.chrome.for.testing.manifest\n en.lproj/\n Localizable.strings\n Frameworks/\n Google Chrome for Testing Framework.framework/\n Versions/\n 130.0.6723.69/\n Libraries/\n WidevineCdm/\n _platform_specific/\n mac_arm64/\n libwidevinecdm.dylib\n bring_your_own_key/\n hf.yaml\n mixed.yaml\n openai.yaml\n openrouter.yaml", + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_1DUkSfLq6Io8xowqVjJEuybM", - content: - 'Path problems:\n\nThe path "src/lib.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "tool", - content: { - tool_call_id: "call_n9FACVDl6a4tllgyWaLCl8s9", - content: - 'Path problems:\n\nThe path "src/main.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "tool", - content: { - tool_call_id: "call_sNnvyOLlmulfHIikJzKhmz55", - content: - 'Path problems:\n\nThe path "src/Cargo.toml" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_h8biZ8JNUjeWGbL9CCatbPcf", - function: { - arguments: '{"paths":"src/background_tasks.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_4we2wH5H50A2m6CIJqKbzYH8", + function: { + arguments: '{"paths": "Cargo.toml", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 0, + }, + { + id: "call_vmIGl31ytfpLWPkc138HJnxz", + function: { + arguments: '{"paths": "README.md", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 1, + }, + { + id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", + function: { + arguments: + '{"paths": "tests/emergency_frog_situation/frog.py", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 2, + }, + ], }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_h8biZ8JNUjeWGbL9CCatbPcf", - content: - 'Path problems:\n\nThe path "src/background_tasks.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", - function: { - arguments: - '{"paths":"src/tools/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_QD1oyHwPOvvFdYUfV3ijiKzB", + content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", - content: - 'Path problems:\n\nThe path "src/tools/integr_postgres.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_RXDRwr3yCJEplHud4xRoxWlC", - function: { - arguments: - '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_vmIGl31ytfpLWPkc138HJnxz", + content: + 'Path problems:\n\nThe path "README.md" does not exist. There are no similar names either.\n', + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_RXDRwr3yCJEplHud4xRoxWlC", - content: - 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_OvYAforNKAFz0d7mv96AnTVR", - function: { - arguments: - '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_4we2wH5H50A2m6CIJqKbzYH8", + content: + 'Path problems:\n\nThe path "Cargo.toml" does not exist. There are no similar names either.\n', + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_OvYAforNKAFz0d7mv96AnTVR", - content: - 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_3wuMILBYl0z7pBMUoJOwohQf", - function: { - arguments: - '{"paths":"src/integrations/integr_github.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "context_file", + content: [ + { + file_name: "frog.py", + file_content: + 'import numpy as np\n\nDT = 0.01\n\nclass Frog:\n def __init__(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n\n def bounce_off_banks(self, pond_width, pond_height):\n if self.x < 0:\n self.vx = np.abs(self.vx)\n elif self.x > pond_width:\n self.vx = -np.abs(self.vx)\n if self.y < 0:\n self.vy = np.abs(self.vy)\n elif self.y > pond_height:\n self.vy = -np.abs(self.vy)\n\n def jump(self, pond_width, pond_height):\n self.x += self.vx * DT\n self.y += self.vy * DT\n self.bounce_off_banks(pond_width, pond_height)\n self.x = np.clip(self.x, 0, pond_width)\n self.y = np.clip(self.y, 0, pond_height)\n\n def croak(self, n_times):\n for n in range(n_times):\n print("croak")\n\n\nclass AlternativeFrog:\n def alternative_jump(self):\n pass\n\n\ndef standalone_jumping_function():\n print("I\'m a frog! Jump! Jump! Really!")\n', + line1: 1, + line2: 39, + }, + ], }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_3wuMILBYl0z7pBMUoJOwohQf", - content: - 'Path problems:\n\nThe path "src/integrations/integr_github.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_AzBRj8RiL1wc8eejuBVdaS6t", - function: { - arguments: - '{"paths":"src/integrations/integr_gitlab.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_sNnvyOLlmulfHIikJzKhmz55", + function: { + arguments: '{"paths": "src/Cargo.toml", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 0, + }, + { + id: "call_n9FACVDl6a4tllgyWaLCl8s9", + function: { + arguments: '{"paths": "src/main.rs", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 1, + }, + { + id: "call_1DUkSfLq6Io8xowqVjJEuybM", + function: { + arguments: '{"paths": "src/lib.rs", "symbols": ""}', + name: "cat", + }, + type: "function", + index: 2, + }, + ], }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_AzBRj8RiL1wc8eejuBVdaS6t", - content: - 'Path problems:\n\nThe path "src/integrations/integr_gitlab.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", - function: { - arguments: - '{"paths":"src/integrations/integr_chrome.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_1DUkSfLq6Io8xowqVjJEuybM", + content: + 'Path problems:\n\nThe path "src/lib.rs" does not exist. There are no similar names either.\n', + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", - content: - 'Path problems:\n\nThe path "src/integrations/integr_chrome.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_kZFTzONZdyo11FQcSudIo3vK", - function: { - arguments: - '{"paths":"src/integrations/integr_pdb.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_n9FACVDl6a4tllgyWaLCl8s9", + content: + 'Path problems:\n\nThe path "src/main.rs" does not exist. There are no similar names either.\n', + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_kZFTzONZdyo11FQcSudIo3vK", - content: - 'Path problems:\n\nThe path "src/integrations/integr_pdb.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_MDynldaxbGEuCKSuQg0Vgk5z", - function: { - arguments: - '{"paths":"src/integrations/integr_docker.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_sNnvyOLlmulfHIikJzKhmz55", + content: + 'Path problems:\n\nThe path "src/Cargo.toml" does not exist. There are no similar names either.\n', + tool_failed: false, }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_MDynldaxbGEuCKSuQg0Vgk5z", - content: - 'Path problems:\n\nThe path "src/integrations/integr_docker.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_9fCGZwstx7G1MgHs6JD5JWTn", - function: { - arguments: - '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_h8biZ8JNUjeWGbL9CCatbPcf", + function: { + arguments: '{"paths":"src/background_tasks.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_9fCGZwstx7G1MgHs6JD5JWTn", - content: - 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', - tool_failed: false, - }, - }, - { - role: "assistant", - content: "", - tool_calls: [ - { - id: "call_etmMcI1UwBSaWwZHzxsuL8xu", - function: { - arguments: - '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', - name: "cat", - }, - type: "function", - index: 0, + { + role: "tool", + tool_call_id: "call_h8biZ8JNUjeWGbL9CCatbPcf", + content: + 'Path problems:\n\nThe path "src/background_tasks.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", + function: { + arguments: + '{"paths":"src/tools/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_coI5PDRF2O2Mjcfb8bdZ4Heo", + content: + 'Path problems:\n\nThe path "src/tools/integr_postgres.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_RXDRwr3yCJEplHud4xRoxWlC", + function: { + arguments: + '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_RXDRwr3yCJEplHud4xRoxWlC", + content: + 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_OvYAforNKAFz0d7mv96AnTVR", + function: { + arguments: + '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_OvYAforNKAFz0d7mv96AnTVR", + content: + 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_3wuMILBYl0z7pBMUoJOwohQf", + function: { + arguments: + '{"paths":"src/integrations/integr_github.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_3wuMILBYl0z7pBMUoJOwohQf", + content: + 'Path problems:\n\nThe path "src/integrations/integr_github.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_AzBRj8RiL1wc8eejuBVdaS6t", + function: { + arguments: + '{"paths":"src/integrations/integr_gitlab.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_AzBRj8RiL1wc8eejuBVdaS6t", + content: + 'Path problems:\n\nThe path "src/integrations/integr_gitlab.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", + function: { + arguments: + '{"paths":"src/integrations/integr_chrome.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_bJ2MrMSJHk4IF6Gp5DNLhJZP", + content: + 'Path problems:\n\nThe path "src/integrations/integr_chrome.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_kZFTzONZdyo11FQcSudIo3vK", + function: { + arguments: + '{"paths":"src/integrations/integr_pdb.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_kZFTzONZdyo11FQcSudIo3vK", + content: + 'Path problems:\n\nThe path "src/integrations/integr_pdb.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_MDynldaxbGEuCKSuQg0Vgk5z", + function: { + arguments: + '{"paths":"src/integrations/integr_docker.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_MDynldaxbGEuCKSuQg0Vgk5z", + content: + 'Path problems:\n\nThe path "src/integrations/integr_docker.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_9fCGZwstx7G1MgHs6JD5JWTn", + function: { + arguments: + '{"paths":"src/integrations/integr_abstract.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_9fCGZwstx7G1MgHs6JD5JWTn", + content: + 'Path problems:\n\nThe path "src/integrations/integr_abstract.rs" does not exist. There are no similar names either.\n', + tool_failed: false, + }, + { + role: "assistant", + content: "", + tool_calls: [ + { + id: "call_etmMcI1UwBSaWwZHzxsuL8xu", + function: { + arguments: + '{"paths":"src/integrations/integr_postgres.rs","symbols":""}', + name: "cat", + }, + type: "function", + index: 0, + }, + ], + }, + { + role: "tool", + tool_call_id: "call_etmMcI1UwBSaWwZHzxsuL8xu", + content: + 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', + tool_failed: false, }, ], - }, - { - role: "tool", - content: { - tool_call_id: "call_etmMcI1UwBSaWwZHzxsuL8xu", - content: - 'Path problems:\n\nThe path "src/integrations/integr_postgres.rs" does not exist. There are no similar names either.\n', - tool_failed: false, + title: + "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", + model: "", + tool_use: "agent", + integration: { + name: "postgres", + path: "", + project: "", }, - }, - ], - title: - "🔧 The postgres tool should be visible now. To test the tool, list the tables available, briefly desctibe the tables and express\nsatisfaction and relief if it works, and change nothing. If it doesn't work or the tool isn't available, go through the usual plan in the system prompt.\nThe current config file is .\n", - model: "", - tool_use: "agent", - integration: { - name: "postgres", - path: "", - project: "", - }, - read: true, - new_chat_suggested: { - wasSuggested: false, + new_chat_suggested: { + wasSuggested: false, + }, + createdAt: "2024-12-02T14:42:18.902Z", + updatedAt: "2024-12-02T14:42:18.902Z", + }, + streaming: false, + waiting_for_response: false, + prevent_send: true, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, + }, + snapshot_received: true, + task_widget_expanded: false, }, - createdAt: "2024-12-02T14:42:18.902Z", - updatedAt: "2024-12-02T14:42:18.902Z", }, - error: null, - prevent_send: true, - waiting_for_response: false, max_new_tokens: 4096, - cache: {}, system_prompt: {}, tool_use: "agent", - send_immediately: false, - queued_messages: [], + sse_refresh_requested: null, + stream_version: 0, }; diff --git a/refact-agent/gui/src/__fixtures__/chat_textdoc.ts b/refact-agent/gui/src/__fixtures__/chat_textdoc.ts index a7fbc113e..24ecc1563 100644 --- a/refact-agent/gui/src/__fixtures__/chat_textdoc.ts +++ b/refact-agent/gui/src/__fixtures__/chat_textdoc.ts @@ -41,12 +41,10 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01XVhkyaDunsy4fPrDqy3toa", - content: - "🗃️e19af1e7b3\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️d84f5c4a7c\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️ae3f1228bd\n[\n[\"goal\", \"Rename all occurrences of 'frog' to 'bird' in the project\"],\n[\"tree(use_ast=true)\", \"Found emergency_frog_situation/ with index.html, holiday.py, work_day.py, game.js, jump_to_conclusions.py, bird.py, set_as_avatar.py\"],\n[\"search(query='frog', scope='workspace')\", \"Found frog references in work_day.py (imports, function), jump_to_conclusions.py (imports, class usage), bird.py already has Bird class\"],\n[\"thinking\", \"bird.py already has Bird class and set_as_avatar.py uses it, so we need to update work_day.py and jump_to_conclusions.py to use the existing Bird class\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 001 'work_day.py' changed import frog->bird, bring_your_own_frog->bring_your_own_bird, frog.Frog->bird.Bird\"],\n[\"patch(tickets='001', path='tests/emergency_frog_situation/work_day.py')\", \"3 chunks applied: import change, function rename, type annotation update\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 002 'jump_to_conclusions.py' changed import frog->bird, draw_hello_frog->draw_hello_bird, all frog.Frog->bird.Bird\"],\n[\"patch(tickets='002', path='tests/emergency_frog_situation/jump_to_conclusions.py')\", \"5 chunks applied: import, function rename, constructor call, type annotation, function call\"],\n[\"outcome\", \"SUCCESS\"]\n]\n\n🗃️2b684b6e70\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n", - tool_failed: false, - }, + tool_call_id: "toolu_01XVhkyaDunsy4fPrDqy3toa", + content: + "🗃️e19af1e7b3\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n\n🗃️d84f5c4a7c\nAdditional instructions for django web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and locate(), looking into relevant files using cat(). If you see reference designs and sketches, read them using cat()\n2. Start django server\n3. Navigate to the place on the website that user wants to change, make a screenshot to make sure you understand what exactly needs to change\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it.\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place.\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE YOU HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n🗃️ae3f1228bd\n[\n[\"goal\", \"Rename all occurrences of 'frog' to 'bird' in the project\"],\n[\"tree(use_ast=true)\", \"Found emergency_frog_situation/ with index.html, holiday.py, work_day.py, game.js, jump_to_conclusions.py, bird.py, set_as_avatar.py\"],\n[\"search(query='frog', scope='workspace')\", \"Found frog references in work_day.py (imports, function), jump_to_conclusions.py (imports, class usage), bird.py already has Bird class\"],\n[\"thinking\", \"bird.py already has Bird class and set_as_avatar.py uses it, so we need to update work_day.py and jump_to_conclusions.py to use the existing Bird class\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 001 'work_day.py' changed import frog->bird, bring_your_own_frog->bring_your_own_bird, frog.Frog->bird.Bird\"],\n[\"patch(tickets='001', path='tests/emergency_frog_situation/work_day.py')\", \"3 chunks applied: import change, function rename, type annotation update\"],\n[\"coding\", \"📍REWRITE_WHOLE_FILE 002 'jump_to_conclusions.py' changed import frog->bird, draw_hello_frog->draw_hello_bird, all frog.Frog->bird.Bird\"],\n[\"patch(tickets='002', path='tests/emergency_frog_situation/jump_to_conclusions.py')\", \"5 chunks applied: import, function rename, constructor call, type annotation, function call\"],\n[\"outcome\", \"SUCCESS\"]\n]\n\n🗃️2b684b6e70\nYou have a specialization today: web development.\n\nYou only need to receive instructions from the user once, and then you can autonomously fill in the details of\nthe task, make the necessary changes, verify results and make adjustments and fixes.\n\nHere's your approximate web development plan:\n1. Investigate project to understand the task given by the user, start with calling tree() and looking into relevant files. If you see reference designs and sketches, read them using cat().\n2. Run the server. You don't have direct access to the command line. Look if there's a tool for that purpose. If there is not, you cannot run a web server.\n3. Make relevant screenshots of existing website using chrome(), open both desktop and mobile tabs if the task requires it.\n4. Form a complete interpretation of the task, and write a plan.\n5. Make changes in files using 📍-notation, after that call patch(). Really, first you need to write the updates using 📍-notation, only after that you can apply it using patch().\n6. Check if screenshots got better, or any errors appeared.\n7. Goto 5, unless you see the task is complete.\n\nAs a web developer agent, you need to pay attention to detail. The task is complete if all the elements\nare at the right place. You really need to cat() designs and sketches if they are present in the task.\n\nIf you don't see a way to run a real server for the website, then just use chrome() to look\nat .html pages using file:// addresses.\n\nHere is a compressed example of successful trajectory from another project:\n\nDON'T DO STUPID THINGS:\n* DON'T SKIP MAKING SCREENSHOTS\n* DON'T CALL patch() UNTIL YOU FINIHSHED WRITING CODE IN 📍-NOTATION\n* DON'T ASK USER ANYTHING, YOU HAVE AUTONOMOUS WORK TO DO\n* MAKE SURE IF HAVE A TOOL CALL IN THE END OF EACH RESPONSE, UNLESS YOU COMPLETED AND TESTED THE TASK\n\n", + tool_failed: false, }, { role: "assistant", @@ -67,12 +65,10 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", - content: - "/\n home/\n svakhreev/\n projects/\n refact-lsp/\n tests/\n emergency_frog_situation/\n holiday.py\n work_day.py\n __pycache__/\n frog.cpython-310.pyc\n frog.py\n jump_to_conclusions.py\n set_as_avatar.py", - tool_failed: false, - }, + tool_call_id: "toolu_01HMyLgKsLQURM9vgd3vQKXN", + content: + "/\n home/\n svakhreev/\n projects/\n refact-lsp/\n tests/\n emergency_frog_situation/\n holiday.py\n work_day.py\n __pycache__/\n frog.cpython-310.pyc\n frog.py\n jump_to_conclusions.py\n set_as_avatar.py", + tool_failed: false, }, { role: "assistant", @@ -94,12 +90,10 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_0136dUTkih5ES8rrzHa5B5ep", - content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", - tool_failed: false, - }, + tool_call_id: "toolu_0136dUTkih5ES8rrzHa5B5ep", + content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/frog.py\n", + tool_failed: false, }, { role: "context_file", @@ -982,12 +976,10 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", - content: - 'No replacement was performed, `pattern` \n```\n\\s*#.*$\n```\ndid not appear verbatim in "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py". Consider checking the file content using `cat()`', - tool_failed: false, - }, + tool_call_id: "toolu_01LHPjgzJ8SEuczfm6Av7qfv", + content: + 'No replacement was performed, `pattern` \n```\n\\s*#.*$\n```\ndid not appear verbatim in "/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py". Consider checking the file content using `cat()`', + tool_failed: false, }, { role: "assistant", @@ -1009,12 +1001,10 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_019iakkKqUjKP73EmEgVhCkZ", - content: - "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py\n", - tool_failed: false, - }, + tool_call_id: "toolu_019iakkKqUjKP73EmEgVhCkZ", + content: + "Paths found:\n/Users/marc/Projects/refact-lsp/tests/emergency_frog_situation/test_frog.py\n", + tool_failed: false, }, { role: "context_file", @@ -1102,7 +1092,6 @@ export const CHAT_WITH_TEXTDOC: ChatThread = { last_user_message_id: "e6304800-f080-425c-b1e3-c1f2ce267792", tool_use: "agent", mode: "AGENT", - read: true, isTitleGenerated: true, createdAt: "2025-02-14T06:47:26.640Z", updatedAt: "2025-02-14T06:47:26.640Z", diff --git a/refact-agent/gui/src/__fixtures__/checkpoints.ts b/refact-agent/gui/src/__fixtures__/checkpoints.ts index b3975420f..15fbb9455 100644 --- a/refact-agent/gui/src/__fixtures__/checkpoints.ts +++ b/refact-agent/gui/src/__fixtures__/checkpoints.ts @@ -6,6 +6,8 @@ export const STUB_PREVIEWED_CHECKPOINT_DATA: CheckpointsMeta["latestCheckpointRe current_checkpoints: [], checkpoints_for_undo: [], error_log: [], + chat_id: "test-chat-id", + chat_mode: "agent", reverted_changes: [ { files_changed: [ @@ -46,5 +48,7 @@ export const STUB_RESTORED_CHECKPOINTS_STATE_WITH_NO_CHANGES: CheckpointsMeta = current_checkpoints: [], reverted_changes: [], error_log: [], + chat_id: "test-chat-id-2", + chat_mode: "agent", }, }; diff --git a/refact-agent/gui/src/__fixtures__/confirmation.ts b/refact-agent/gui/src/__fixtures__/confirmation.ts index 0c3edc013..d1b4bc5b9 100644 --- a/refact-agent/gui/src/__fixtures__/confirmation.ts +++ b/refact-agent/gui/src/__fixtures__/confirmation.ts @@ -6,6 +6,7 @@ export const CONFIRMATIONAL_PAUSE_REASONS_WITH_PATH: ToolConfirmationPauseReason command: "SELECT *", rule: "*", type: "confirmation", + tool_name: "postgres", tool_call_id: "1", integr_config_path: "\\\\?\\d:\\work\\refact.ai\\refact-lsp\\.refact\\integrations\\postgres.yaml", @@ -16,6 +17,7 @@ export const CONFIRMATIONAL_PAUSE_REASONS: ToolConfirmationPauseReason[] = [ command: "patch", rule: "default", type: "confirmation", + tool_name: "patch", tool_call_id: "1", integr_config_path: null, }, @@ -26,6 +28,7 @@ export const DENIAL_PAUSE_REASONS_WITH_PATH: ToolConfirmationPauseReason[] = [ command: "SELECT *", rule: "*", type: "denial", + tool_name: "postgres", tool_call_id: "1", integr_config_path: "\\\\?\\d:\\work\\refact.ai\\refact-lsp\\.refact\\integrations\\postgres.yaml", @@ -37,6 +40,7 @@ export const MIXED_PAUSE_REASONS: ToolConfirmationPauseReason[] = [ command: "SELECT *", rule: "*", type: "denial", + tool_name: "postgres", tool_call_id: "1", integr_config_path: "\\\\?\\d:\\work\\refact.ai\\refact-lsp\\.refact\\integrations\\postgres.yaml", @@ -45,6 +49,7 @@ export const MIXED_PAUSE_REASONS: ToolConfirmationPauseReason[] = [ command: "DROP *", rule: "*", type: "confirmation", + tool_name: "postgres", tool_call_id: "1", integr_config_path: "\\\\?\\d:\\work\\refact.ai\\refact-lsp\\.refact\\integrations\\postgres.yaml", diff --git a/refact-agent/gui/src/__fixtures__/history.ts b/refact-agent/gui/src/__fixtures__/history.ts index 55339ed76..81a6aa3e0 100644 --- a/refact-agent/gui/src/__fixtures__/history.ts +++ b/refact-agent/gui/src/__fixtures__/history.ts @@ -1,6 +1,5 @@ -import type { RootState } from "../app/store"; +import type { ChatHistoryItem } from "../features/History/historySlice"; -type ChatHistoryItem = RootState["history"]["messages"]; export const HISTORY: ChatHistoryItem[] = [ { id: "be20f605-824c-4e77-9dab-a45688f676fa", @@ -61,11 +60,9 @@ export const HISTORY: ChatHistoryItem[] = [ }, { role: "tool", - content: { - tool_call_id: "call_D0rhujadTb1nvKlMbZ8ZYLEt", - content: "performed vecdb search, results below", - tool_failed: false, - }, + tool_call_id: "call_D0rhujadTb1nvKlMbZ8ZYLEt", + content: "performed vecdb search, results below", + tool_failed: false, }, { role: "context_file", diff --git a/refact-agent/gui/src/__fixtures__/knowledge.ts b/refact-agent/gui/src/__fixtures__/knowledge.ts index 57d27a22e..011ff0e21 100644 --- a/refact-agent/gui/src/__fixtures__/knowledge.ts +++ b/refact-agent/gui/src/__fixtures__/knowledge.ts @@ -3,49 +3,44 @@ import type { MemoRecord, VecDbStatus } from "../services/refact"; export const STUB_MEMORIES: MemoRecord[] = [ { memid: "7666487b81", - m_type: "proj-fact", - m_goal: "compile", - m_project: "proj1", - m_payload: "Looks like proj1 is written in fact in Rust.", - m_origin: "local-committed", - mstat_correct: 1, - mstat_relevant: -1, - mstat_times_used: 1, + tags: ["rust", "compile"], + content: "Looks like proj1 is written in fact in Rust.", + file_path: ".refact/knowledge/2024-01-01_120000_12345678_rust-project.md", + title: "Rust Project Information", + created: "2024-01-01", + kind: "code", + score: 0.95, }, { memid: "cdec854819", - m_type: "seq-of-acts", - m_goal: "compile", - m_project: "proj2", - m_payload: "Wow, running cargo build on proj2 was successful!", - m_origin: "local-committed", - mstat_correct: 0, - mstat_relevant: 0, - mstat_times_used: 0, + tags: ["rust", "build"], + content: "Wow, running cargo build on proj2 was successful!", + file_path: ".refact/knowledge/2024-01-02_120000_abcdef12_build-success.md", + title: "Build Success", + created: "2024-01-02", + kind: "decision", + score: 0.85, }, { memid: "eb1d64684b", - m_type: "proj-fact", - m_goal: "compile", - m_project: "proj2", - m_payload: "Looks like proj2 is written in fact in Rust.", - m_origin: "local-committed", - mstat_correct: 0, - mstat_relevant: 0, - mstat_times_used: 0, + tags: ["rust", "project"], + content: "Looks like proj2 is written in fact in Rust.", + file_path: ".refact/knowledge/2024-01-03_120000_fedcba98_project-info.md", + title: "Project Information", + created: "2024-01-03", + kind: "code", + score: 0.9, }, - { memid: "eb1d64684c", - m_type: "proj-fact", - m_goal: + tags: ["rust", "documentation"], + content: "Looks like proj2 is written in fact in Rust.", + file_path: ".refact/knowledge/2024-01-04_120000_11223344_long-doc.md", + title: "Long goal Long goal Long goal Long goal Long goal Long goal Long goal Long goal Long goal Long goal", - m_project: "proj2", - m_payload: "Looks like proj2 is written in fact in Rust.", - m_origin: "local-committed", - mstat_correct: 0, - mstat_relevant: 0, - mstat_times_used: 0, + created: "2024-01-04", + kind: "code", + score: 0.8, }, ]; diff --git a/refact-agent/gui/src/__fixtures__/markdown-issue.ts b/refact-agent/gui/src/__fixtures__/markdown-issue.ts index f141e5301..1a1a45ed4 100644 --- a/refact-agent/gui/src/__fixtures__/markdown-issue.ts +++ b/refact-agent/gui/src/__fixtures__/markdown-issue.ts @@ -37,12 +37,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01JbWarAwzjMyV6azDkd5skX", - content: - "/\n home/\n fupfv/\n git/\n benchmark1_0701/\n 12.zip\n LICENSE\n README.md\n VISUALIZATION.md\n example_new_file.py\n grafana-dashboard.json\n llm_load_test.zip\n llm_load_test/\n README.md\n requirements.txt\n src/\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.py\n load_test_report_20240811_002319.csv\n load_test_report_20240811_002319.json\n make_scripts_executable.sh\n requirements.txt\n results/\n run_20250129_152629/\n load_test_report_2025-01-29T152630.827620.csv\n load_test_report_2025-01-29T152630.827620.json\n load_test_report_2025-01-29T152636.621391.csv\n load_test_report_2025-01-29T152636.621391.json\n load_test_report_2025-01-29T152642.333384.csv\n load_test_report_2025-01-29T152642.333384.json\n load_test_report_2025-01-29T152648.032846.csv\n load_test_report_2025-01-29T152648.032846.json\n load_test_report_2025-01-29T152653.733025.csv\n load_test_report_2025-01-29T152653.733025.json\n load_test_report_2025-01-29T152659.442419.csv\n load_test_report_2025-01-29T152659.442419.json\n load_test_report_20250129_152704.csv\n load_test_report_20250129_152704.json\n run_20250129_152807/\n load_test_report_2025-01-29T152808.476840.csv\n load_test_report_2025-01-29T152808.476840.json\n load_test_report_2025-01-29T152814.290370.csv\n load_test_report_2025-01-29T152814.290370.json\n load_test_report_2025-01-29T152819.988992.csv\n load_test_report_2025-01-29T152819.988992.json\n load_test_report_2025-01-29T152825.712261.csv\n load_test_report_2025-01-29T152825.712261.json\n load_test_report_2025-01-29T152831.461047.csv\n load_test_report_2025-01-29T152831.461047.json\n load_test_report_2025-01-29T152837.233726.csv\n load_test_report_2025-01-29T152837.233726.json\n load_test_report_20250129_152842.csv\n load_test_report_20250129_152842.json\n run_20250129_152930/\n load_test_report_2025-01-29T153031.809694.csv\n load_test_report_2025-01-29T153031.809694.json\n load_test_report_2025-01-29T153137.610641.csv\n load_test_report_2025-01-29T153137.610641.json\n load_test_report_2025-01-29T153243.818603.csv\n load_test_report_2025-01-29T153243.818603.json\n load_test_report_2025-01-29T153349.887918.csv\n load_test_report_2025-01-29T153349.887918.json\n load_test_report_2025-01-29T153504.701174.csv\n load_test_report_2025-01-29T153504.701174.json\n load_test_report_2025-01-29T153615.800362.csv\n load_test_report_2025-01-29T153615.800362.json\n load_test_report_20250129_153620.csv\n load_test_report_20250129_153620.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n src/\n __pycache__/\n llm_test_logger.cpython-310.pyc\n load_test.cpython-310.pyc\n compare_runs.py\n dashboard_generator.py\n from transformers import AutoTokenizer.py\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.log\n load_test.py\n load_test_aggregator.py\n load_test_tgi.py\n load_test_vllm.py\n qwen_run_20250128_193328.zip\n qwen_run_20250129_131310.zip\n results/\n run_20250129_131310/\n load_test_report_2025-01-29T131340.582736.csv\n load_test_report_2025-01-29T131340.582736.json\n load_test_report_2025-01-29T131416.770529.csv\n load_test_report_2025-01-29T131416.770529.json\n load_test_report_2025-01-29T131452.904227.csv\n load_test_report_2025-01-29T131452.904227.json\n load_test_report_2025-01-29T131529.208363.csv\n load_test_report_2025-01-29T131529.208363.json\n load_test_report_2025-01-29T131612.332502.csv\n load_test_report_2025-01-29T131612.332502.json\n load_test_report_2025-01-29T131654.024454.csv\n load_test_report_2025-01-29T131654.024454.json\n load_test_report_20250129_131659.csv\n load_test_report_20250129_131659.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_131828/\n load_test_report_2025-01-29T131859.729718.csv\n load_test_report_2025-01-29T131859.729718.json\n load_test_report_2025-01-29T131935.556939.csv\n load_test_report_2025-01-29T131935.556939.json\n load_test_report_2025-01-29T132011.817203.csv\n load_test_report_2025-01-29T132011.817203.json\n load_test_report_2025-01-29T132047.948690.csv\n load_test_report_2025-01-29T132047.948690.json\n load_test_report_2025-01-29T132140.620425.csv\n load_test_report_2025-01-29T132140.620425.json\n load_test_report_2025-01-29T132237.254055.csv\n load_test_report_2025-01-29T132237.254055.json\n load_test_report_20250129_132242.csv\n load_test_report_20250129_132242.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_132842/\n load_test_report_2025-01-29T132913.096074.csv\n load_test_report_2025-01-29T132913.096074.json\n load_test_report_2025-01-29T132949.286127.csv\n load_test_report_2025-01-29T132949.286127.json\n load_test_report_2025-01-29T133025.273897.csv\n load_test_report_2025-01-29T133025.273897.json\n load_test_report_2025-01-29T133102.000762.csv\n load_test_report_2025-01-29T133102.000762.json\n load_test_report_2025-01-29T133154.340248.csv\n load_test_report_2025-01-29T133154.340248.json\n load_test_report_2025-01-29T133257.783732.csv\n load_test_report_2025-01-29T133257.783732.json\n load_test_report_20250129_133302.csv\n load_test_report_20250129_133302.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_133711/\n load_test_report_2025-01-29T133742.239356.csv\n load_test_report_2025-01-29T133742.239356.json\n load_test_report_2025-01-29T133818.175709.csv\n load_test_report_2025-01-29T133818.175709.json\n load_test_report_2025-01-29T133853.789246.csv\n load_test_report_2025-01-29T133853.789246.json\n load_test_report_2025-01-29T133929.633962.csv\n load_test_report_2025-01-29T133929.633962.json\n load_test_report_2025-01-29T134013.341083.csv\n load_test_report_2025-01-29T134013.341083.json\n load_test_report_2025-01-29T134101.336503.csv\n load_test_report_2025-01-29T134101.336503.json\n load_test_report_20250129_134106.csv\n load_test_report_20250129_134106.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_134818/\n load_test_report_2025-01-29T134919.598778.csv\n load_test_report_2025-01-29T134919.598778.json\n load_test_report_2025-01-29T135025.745361.csv\n load_test_report_2025-01-29T135025.745361.json\n load_test_report_2025-01-29T135131.347054.csv\n load_test_report_2025-01-29T135131.347054.json\n load_test_report_2025-01-29T135237.241605.csv\n load_test_report_2025-01-29T135237.241605.json\n load_test_report_2025-01-29T135352.526234.csv\n load_test_report_2025-01-29T135352.526234.json\n load_test_report_2025-01-29T135509.169860.csv\n load_test_report_2025-01-29T135509.169860.json\n load_test_report_20250129_135514.csv\n load_test_report_20250129_135514.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_135810/\n load_test_report_2025-01-29T135911.302460.csv\n load_test_report_2025-01-29T135911.302460.json\n load_test_report_2025-01-29T140017.766295.csv\n load_test_report_2025-01-29T140017.766295.json\n load_test_report_2025-01-29T140123.329253.csv\n load_test_report_2025-01-29T140123.329253.json\n load_test_report_2025-01-29T140229.087510.csv\n load_test_report_2025-01-29T140229.087510.json\n load_test_report_2025-01-29T140354.254251.csv\n load_test_report_2025-01-29T140354.254251.json\n load_test_report_2025-01-29T140522.596391.csv\n load_test_report_2025-01-29T140522.596391.json\n load_test_report_20250129_140527.csv\n load_test_report_20250129_140527.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_140726/\n load_test_report_2025-01-29T140828.249744.csv\n load_test_report_2025-01-29T140828.249744.json\n load_test_report_2025-01-29T140935.241087.csv\n load_test_report_2025-01-29T140935.241087.json\n load_test_report_2025-01-29T141041.737827.csv\n load_test_report_2025-01-29T141041.737827.json\n load_test_report_2025-01-29T141148.575547.csv\n load_test_report_2025-01-29T141148.575547.json\n load_test_report_2025-01-29T141257.979330.csv\n load_test_report_2025-01-29T141257.979330.json\n load_test_report_2025-01-29T141407.813467.csv\n load_test_report_2025-01-29T141407.813467.json\n load_test_report_2025-01-29T141517.031485.csv\n load_test_report_2025-01-29T141517.031485.json\n load_test_report_2025-01-29T141626.812125.csv\n load_test_report_2025-01-29T141626.812125.json\n load_test_report_2025-01-29T141738.980843.csv\n load_test_report_2025-01-29T141738.980843.json\n load_test_report_2025-01-29T141852.372524.csv\n load_test_report_2025-01-29T141852.372524.json\n load_test_report_2025-01-29T142006.313659.csv\n load_test_report_2025-01-29T142006.313659.json\n load_test_report_2025-01-29T142122.053494.csv\n load_test_report_2025-01-29T142122.053494.json\n load_test_report_20250129_142127.csv\n load_test_report_20250129_142127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_142324/\n load_test_report_2025-01-29T142426.095040.csv\n load_test_report_2025-01-29T142426.095040.json\n load_test_report_2025-01-29T142532.101781.csv\n load_test_report_2025-01-29T142532.101781.json\n load_test_report_2025-01-29T142638.130364.csv\n load_test_report_2025-01-29T142638.130364.json\n load_test_report_2025-01-29T142744.373122.csv\n load_test_report_2025-01-29T142744.373122.json\n load_test_report_2025-01-29T142851.436595.csv\n load_test_report_2025-01-29T142851.436595.json\n load_test_report_2025-01-29T142958.649875.csv\n load_test_report_2025-01-29T142958.649875.json\n load_test_report_2025-01-29T143105.820377.csv\n load_test_report_2025-01-29T143105.820377.json\n load_test_report_2025-01-29T143213.483254.csv\n load_test_report_2025-01-29T143213.483254.json\n load_test_report_2025-01-29T143322.075349.csv\n load_test_report_2025-01-29T143322.075349.json\n load_test_report_2025-01-29T143431.160350.csv\n load_test_report_2025-01-29T143431.160350.json\n load_test_report_2025-01-29T143540.792112.csv\n load_test_report_2025-01-29T143540.792112.json\n load_test_report_2025-01-29T143651.193158.csv\n load_test_report_2025-01-29T143651.193158.json\n load_test_report_20250129_143656.csv\n load_test_report_20250129_143656.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_144231/\n load_test_report_2025-01-29T144333.225207.csv\n load_test_report_2025-01-29T144333.225207.json\n load_test_report_2025-01-29T144441.892228.csv\n load_test_report_2025-01-29T144441.892228.json\n load_test_report_2025-01-29T144548.216391.csv\n load_test_report_2025-01-29T144548.216391.json\n load_test_report_2025-01-29T144654.207507.csv\n load_test_report_2025-01-29T144654.207507.json\n load_test_report_2025-01-29T144801.887104.csv\n load_test_report_2025-01-29T144801.887104.json\n load_test_report_2025-01-29T144907.892024.csv\n load_test_report_2025-01-29T144907.892024.json\n load_test_report_2025-01-29T145015.606306.csv\n load_test_report_2025-01-29T145015.606306.json\n load_test_report_2025-01-29T145124.318365.csv\n load_test_report_2025-01-29T145124.318365.json\n load_test_report_2025-01-29T145232.316758.csv\n load_test_report_2025-01-29T145232.316758.json\n load_test_report_2025-01-29T145338.561407.csv\n load_test_report_2025-01-29T145338.561407.json\n load_test_report_2025-01-29T145447.340833.csv\n load_test_report_2025-01-29T145447.340833.json\n load_test_report_2025-01-29T145556.603603.csv\n load_test_report_2025-01-29T145556.603603.json\n load_test_report_20250129_145601.csv\n load_test_report_20250129_145601.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_145926/\n load_test_report_2025-01-29T150027.790900.csv\n load_test_report_2025-01-29T150027.790900.json\n load_test_report_2025-01-29T150134.652497.csv\n load_test_report_2025-01-29T150134.652497.json\n load_test_report_2025-01-29T150242.312479.csv\n load_test_report_2025-01-29T150242.312479.json\n load_test_report_2025-01-29T150348.489497.csv\n load_test_report_2025-01-29T150348.489497.json\n load_test_report_2025-01-29T150454.976232.csv\n load_test_report_2025-01-29T150454.976232.json\n load_test_report_2025-01-29T150600.673114.csv\n load_test_report_2025-01-29T150600.673114.json\n load_test_report_2025-01-29T150708.380006.csv\n load_test_report_2025-01-29T150708.380006.json\n load_test_report_2025-01-29T150814.575034.csv\n load_test_report_2025-01-29T150814.575034.json\n load_test_report_2025-01-29T150923.544283.csv\n load_test_report_2025-01-29T150923.544283.json\n load_test_report_2025-01-29T151030.283486.csv\n load_test_report_2025-01-29T151030.283486.json\n load_test_report_2025-01-29T151138.589944.csv\n load_test_report_2025-01-29T151138.589944.json\n load_test_report_2025-01-29T151248.730621.csv\n load_test_report_2025-01-29T151248.730621.json\n load_test_report_20250129_151253.csv\n load_test_report_20250129_151253.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_160612/\n load_test_report_2025-01-29T160713.432216.csv\n load_test_report_2025-01-29T160713.432216.json\n load_test_report_2025-01-29T160819.907680.csv\n load_test_report_2025-01-29T160819.907680.json\n load_test_report_2025-01-29T160926.784918.csv\n load_test_report_2025-01-29T160926.784918.json\n load_test_report_2025-01-29T161033.828339.csv\n load_test_report_2025-01-29T161033.828339.json\n load_test_report_2025-01-29T161153.205639.csv\n load_test_report_2025-01-29T161153.205639.json\n load_test_report_2025-01-29T161315.237414.csv\n load_test_report_2025-01-29T161315.237414.json\n load_test_report_20250129_161320.csv\n load_test_report_20250129_161320.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_161925/\n load_test_report_2025-01-29T162025.734114.csv\n load_test_report_2025-01-29T162025.734114.json\n load_test_report_2025-01-29T162131.524371.csv\n load_test_report_2025-01-29T162131.524371.json\n load_test_report_2025-01-29T162237.758517.csv\n load_test_report_2025-01-29T162237.758517.json\n load_test_report_2025-01-29T162344.818406.csv\n load_test_report_2025-01-29T162344.818406.json\n load_test_report_2025-01-29T162507.384913.csv\n load_test_report_2025-01-29T162507.384913.json\n load_test_report_2025-01-29T162613.335853.csv\n load_test_report_2025-01-29T162613.335853.json\n load_test_report_20250129_162618.csv\n load_test_report_20250129_162618.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_162732/\n load_test_report_2025-01-29T162834.272459.csv\n load_test_report_2025-01-29T162834.272459.json\n load_test_report_2025-01-29T162941.672408.csv\n load_test_report_2025-01-29T162941.672408.json\n load_test_report_2025-01-29T163048.857712.csv\n load_test_report_2025-01-29T163048.857712.json\n load_test_report_2025-01-29T163157.624546.csv\n load_test_report_2025-01-29T163157.624546.json\n load_test_report_2025-01-29T163306.370415.csv\n load_test_report_2025-01-29T163306.370415.json\n load_test_report_2025-01-29T163416.065472.csv\n load_test_report_2025-01-29T163416.065472.json\n load_test_report_2025-01-29T163524.604470.csv\n load_test_report_2025-01-29T163524.604470.json\n load_test_report_2025-01-29T163632.880248.csv\n load_test_report_2025-01-29T163632.880248.json\n load_test_report_2025-01-29T163745.002002.csv\n load_test_report_2025-01-29T163745.002002.json\n load_test_report_2025-01-29T163902.036068.csv\n load_test_report_2025-01-29T163902.036068.json\n load_test_report_2025-01-29T164009.453151.csv\n load_test_report_2025-01-29T164009.453151.json\n load_test_report_2025-01-29T164122.568066.csv\n load_test_report_2025-01-29T164122.568066.json\n load_test_report_20250129_164127.csv\n load_test_report_20250129_164127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_164620/\n load_test_report_2025-01-29T164721.700661.csv\n load_test_report_2025-01-29T164721.700661.json\n load_test_report_2025-01-29T164827.520353.csv\n load_test_report_2025-01-29T164827.520353.json\n load_test_report_2025-01-29T164933.310367.csv\n load_test_report_2025-01-29T164933.310367.json\n load_test_report_2025-01-29T165039.642351.csv\n load_test_report_2025-01-29T165039.642351.json\n load_test_report_2025-01-29T165154.098239.csv\n load_test_report_2025-01-29T165154.098239.json\n load_test_report_2025-01-29T165308.831481.csv\n load_test_report_2025-01-29T165308.831481.json\n load_test_report_20250129_165313.csv\n load_test_report_20250129_165313.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_165758/\n load_test_report_2025-01-29T165859.461686.csv\n load_test_report_2025-01-29T165859.461686.json\n load_test_report_2025-01-29T170005.472004.csv\n load_test_report_2025-01-29T170005.472004.json\n load_test_report_2025-01-29T170111.422122.csv\n load_test_report_2025-01-29T170111.422122.json\n load_test_report_2025-01-29T170217.557618.csv\n load_test_report_2025-01-29T170217.557618.json\n load_test_report_2025-01-29T170330.493971.csv\n load_test_report_2025-01-29T170330.493971.json\n load_test_report_2025-01-29T170447.558129.csv\n load_test_report_2025-01-29T170447.558129.json\n load_test_report_20250129_170452.csv\n load_test_report_20250129_170452.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_170950/\n load_test_report_2025-01-29T171051.361008.csv\n load_test_report_2025-01-29T171051.361008.json\n load_test_report_2025-01-29T171157.323565.csv\n load_test_report_2025-01-29T171157.323565.json\n load_test_report_2025-01-29T171303.299586.csv\n load_test_report_2025-01-29T171303.299586.json\n load_test_report_2025-01-29T171409.108765.csv\n load_test_report_2025-01-29T171409.108765.json\n load_test_report_2025-01-29T171514.861147.csv\n load_test_report_2025-01-29T171514.861147.json\n load_test_report_2025-01-29T171620.615624.csv\n load_test_report_2025-01-29T171620.615624.json\n load_test_report_2025-01-29T171726.893447.csv\n load_test_report_2025-01-29T171726.893447.json\n load_test_report_2025-01-29T171833.044767.csv\n load_test_report_2025-01-29T171833.044767.json\n load_test_report_2025-01-29T171939.151837.csv\n load_test_report_2025-01-29T171939.151837.json\n load_test_report_2025-01-29T172045.358719.csv\n load_test_report_2025-01-29T172045.358719.json\n load_test_report_2025-01-29T172151.647824.csv\n load_test_report_2025-01-29T172151.647824.json\n load_test_report_2025-01-29T172257.931381.csv\n load_test_report_2025-01-29T172257.931381.json\n load_test_report_2025-01-29T172404.993732.csv\n load_test_report_2025-01-29T172404.993732.json\n load_test_report_2025-01-29T172512.469972.csv\n load_test_report_2025-01-29T172512.469972.json\n load_test_report_2025-01-29T172619.912159.csv\n load_test_report_2025-01-29T172619.912159.json\n load_test_report_2025-01-29T172727.520335.csv\n load_test_report_2025-01-29T172727.520335.json\n load_test_report_2025-01-29T172836.287202.csv\n load_test_report_2025-01-29T172836.287202.json\n load_test_report_2025-01-29T172945.243054.csv\n load_test_report_2025-01-29T172945.243054.json\n load_test_report_2025-01-29T173054.878245.csv\n load_test_report_2025-01-29T173054.878245.json\n load_test_report_2025-01-29T173205.270695.csv\n load_test_report_2025-01-29T173205.270695.json\n load_test_report_2025-01-29T173319.135777.csv\n load_test_report_2025-01-29T173319.135777.json\n load_test_report_2025-01-29T173434.082094.csv\n load_test_report_2025-01-29T173434.082094.json\n load_test_report_2025-01-29T173550.513858.csv\n load_test_report_2025-01-29T173550.513858.json\n load_test_report_2025-01-29T173708.906195.csv\n load_test_report_2025-01-29T173708.906195.json\n load_test_report_20250129_173713.csv\n load_test_report_20250129_173713.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_174215/\n load_test_report_2025-01-29T174316.520550.csv\n load_test_report_2025-01-29T174316.520550.json\n load_test_report_2025-01-29T174422.384594.csv\n load_test_report_2025-01-29T174422.384594.json\n load_test_report_2025-01-29T174528.291764.csv\n load_test_report_2025-01-29T174528.291764.json\n load_test_report_2025-01-29T174633.925509.csv\n load_test_report_2025-01-29T174633.925509.json\n load_test_report_2025-01-29T174740.096886.csv\n load_test_report_2025-01-29T174740.096886.json\n load_test_report_2025-01-29T174845.697959.csv\n load_test_report_2025-01-29T174845.697959.json\n load_test_report_2025-01-29T174952.084484.csv\n load_test_report_2025-01-29T174952.084484.json\n load_test_report_2025-01-29T175058.845237.csv\n load_test_report_2025-01-29T175058.845237.json\n load_test_report_2025-01-29T175205.494738.csv\n load_test_report_2025-01-29T175205.494738.json\n load_test_report_2025-01-29T175312.831611.csv\n load_test_report_2025-01-29T175312.831611.json\n load_test_report_2025-01-29T175419.902976.csv\n load_test_report_2025-01-29T175419.902976.json\n load_test_report_2025-01-29T175527.241889.csv\n load_test_report_2025-01-29T175527.241889.json\n load_test_report_2025-01-29T175635.835204.csv\n load_test_report_2025-01-29T175635.835204.json\n load_test_report_2025-01-29T175744.448069.csv\n load_test_report_2025-01-29T175744.448069.json\n load_test_report_2025-01-29T175853.905293.csv\n load_test_report_2025-01-29T175853.905293.json\n load_test_report_2025-01-29T180003.565666.csv\n load_test_report_2025-01-29T180003.565666.json\n load_test_report_2025-01-29T180115.557518.csv\n load_test_report_2025-01-29T180115.557518.json\n load_test_report_2025-01-29T180228.466492.csv\n load_test_report_2025-01-29T180228.466492.json\n load_test_report_2025-01-29T180342.419821.csv\n load_test_report_2025-01-29T180342.419821.json\n load_test_report_2025-01-29T180457.796778.csv\n load_test_report_2025-01-29T180457.796778.json\n load_test_report_2025-01-29T180620.304565.csv\n load_test_report_2025-01-29T180620.304565.json\n load_test_report_2025-01-29T180746.057385.csv\n load_test_report_2025-01-29T180746.057385.json\n load_test_report_2025-01-29T180914.893498.csv\n load_test_report_2025-01-29T180914.893498.json\n load_test_report_2025-01-29T181046.064760.csv\n load_test_report_2025-01-29T181046.064760.json\n load_test_report_20250129_181051.csv\n load_test_report_20250129_181051.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n starcoder2_run_20250129_123907.zip\n starcoder_run_20250128_20.zip\n starcoder_run_20250129_131828.zip\n test_single_request.py\n visualize_results.py\n temp_file_renamed.txt\n test_data.txt", - tool_failed: false, - }, + tool_call_id: "toolu_01JbWarAwzjMyV6azDkd5skX", + content: + "/\n home/\n fupfv/\n git/\n benchmark1_0701/\n 12.zip\n LICENSE\n README.md\n VISUALIZATION.md\n example_new_file.py\n grafana-dashboard.json\n llm_load_test.zip\n llm_load_test/\n README.md\n requirements.txt\n src/\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.py\n load_test_report_20240811_002319.csv\n load_test_report_20240811_002319.json\n make_scripts_executable.sh\n requirements.txt\n results/\n run_20250129_152629/\n load_test_report_2025-01-29T152630.827620.csv\n load_test_report_2025-01-29T152630.827620.json\n load_test_report_2025-01-29T152636.621391.csv\n load_test_report_2025-01-29T152636.621391.json\n load_test_report_2025-01-29T152642.333384.csv\n load_test_report_2025-01-29T152642.333384.json\n load_test_report_2025-01-29T152648.032846.csv\n load_test_report_2025-01-29T152648.032846.json\n load_test_report_2025-01-29T152653.733025.csv\n load_test_report_2025-01-29T152653.733025.json\n load_test_report_2025-01-29T152659.442419.csv\n load_test_report_2025-01-29T152659.442419.json\n load_test_report_20250129_152704.csv\n load_test_report_20250129_152704.json\n run_20250129_152807/\n load_test_report_2025-01-29T152808.476840.csv\n load_test_report_2025-01-29T152808.476840.json\n load_test_report_2025-01-29T152814.290370.csv\n load_test_report_2025-01-29T152814.290370.json\n load_test_report_2025-01-29T152819.988992.csv\n load_test_report_2025-01-29T152819.988992.json\n load_test_report_2025-01-29T152825.712261.csv\n load_test_report_2025-01-29T152825.712261.json\n load_test_report_2025-01-29T152831.461047.csv\n load_test_report_2025-01-29T152831.461047.json\n load_test_report_2025-01-29T152837.233726.csv\n load_test_report_2025-01-29T152837.233726.json\n load_test_report_20250129_152842.csv\n load_test_report_20250129_152842.json\n run_20250129_152930/\n load_test_report_2025-01-29T153031.809694.csv\n load_test_report_2025-01-29T153031.809694.json\n load_test_report_2025-01-29T153137.610641.csv\n load_test_report_2025-01-29T153137.610641.json\n load_test_report_2025-01-29T153243.818603.csv\n load_test_report_2025-01-29T153243.818603.json\n load_test_report_2025-01-29T153349.887918.csv\n load_test_report_2025-01-29T153349.887918.json\n load_test_report_2025-01-29T153504.701174.csv\n load_test_report_2025-01-29T153504.701174.json\n load_test_report_2025-01-29T153615.800362.csv\n load_test_report_2025-01-29T153615.800362.json\n load_test_report_20250129_153620.csv\n load_test_report_20250129_153620.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n src/\n __pycache__/\n llm_test_logger.cpython-310.pyc\n load_test.cpython-310.pyc\n compare_runs.py\n dashboard_generator.py\n from transformers import AutoTokenizer.py\n llm_load_test_runner.py\n llm_test_logger.py\n load_test.log\n load_test.py\n load_test_aggregator.py\n load_test_tgi.py\n load_test_vllm.py\n qwen_run_20250128_193328.zip\n qwen_run_20250129_131310.zip\n results/\n run_20250129_131310/\n load_test_report_2025-01-29T131340.582736.csv\n load_test_report_2025-01-29T131340.582736.json\n load_test_report_2025-01-29T131416.770529.csv\n load_test_report_2025-01-29T131416.770529.json\n load_test_report_2025-01-29T131452.904227.csv\n load_test_report_2025-01-29T131452.904227.json\n load_test_report_2025-01-29T131529.208363.csv\n load_test_report_2025-01-29T131529.208363.json\n load_test_report_2025-01-29T131612.332502.csv\n load_test_report_2025-01-29T131612.332502.json\n load_test_report_2025-01-29T131654.024454.csv\n load_test_report_2025-01-29T131654.024454.json\n load_test_report_20250129_131659.csv\n load_test_report_20250129_131659.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_131828/\n load_test_report_2025-01-29T131859.729718.csv\n load_test_report_2025-01-29T131859.729718.json\n load_test_report_2025-01-29T131935.556939.csv\n load_test_report_2025-01-29T131935.556939.json\n load_test_report_2025-01-29T132011.817203.csv\n load_test_report_2025-01-29T132011.817203.json\n load_test_report_2025-01-29T132047.948690.csv\n load_test_report_2025-01-29T132047.948690.json\n load_test_report_2025-01-29T132140.620425.csv\n load_test_report_2025-01-29T132140.620425.json\n load_test_report_2025-01-29T132237.254055.csv\n load_test_report_2025-01-29T132237.254055.json\n load_test_report_20250129_132242.csv\n load_test_report_20250129_132242.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_132842/\n load_test_report_2025-01-29T132913.096074.csv\n load_test_report_2025-01-29T132913.096074.json\n load_test_report_2025-01-29T132949.286127.csv\n load_test_report_2025-01-29T132949.286127.json\n load_test_report_2025-01-29T133025.273897.csv\n load_test_report_2025-01-29T133025.273897.json\n load_test_report_2025-01-29T133102.000762.csv\n load_test_report_2025-01-29T133102.000762.json\n load_test_report_2025-01-29T133154.340248.csv\n load_test_report_2025-01-29T133154.340248.json\n load_test_report_2025-01-29T133257.783732.csv\n load_test_report_2025-01-29T133257.783732.json\n load_test_report_20250129_133302.csv\n load_test_report_20250129_133302.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_133711/\n load_test_report_2025-01-29T133742.239356.csv\n load_test_report_2025-01-29T133742.239356.json\n load_test_report_2025-01-29T133818.175709.csv\n load_test_report_2025-01-29T133818.175709.json\n load_test_report_2025-01-29T133853.789246.csv\n load_test_report_2025-01-29T133853.789246.json\n load_test_report_2025-01-29T133929.633962.csv\n load_test_report_2025-01-29T133929.633962.json\n load_test_report_2025-01-29T134013.341083.csv\n load_test_report_2025-01-29T134013.341083.json\n load_test_report_2025-01-29T134101.336503.csv\n load_test_report_2025-01-29T134101.336503.json\n load_test_report_20250129_134106.csv\n load_test_report_20250129_134106.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_134818/\n load_test_report_2025-01-29T134919.598778.csv\n load_test_report_2025-01-29T134919.598778.json\n load_test_report_2025-01-29T135025.745361.csv\n load_test_report_2025-01-29T135025.745361.json\n load_test_report_2025-01-29T135131.347054.csv\n load_test_report_2025-01-29T135131.347054.json\n load_test_report_2025-01-29T135237.241605.csv\n load_test_report_2025-01-29T135237.241605.json\n load_test_report_2025-01-29T135352.526234.csv\n load_test_report_2025-01-29T135352.526234.json\n load_test_report_2025-01-29T135509.169860.csv\n load_test_report_2025-01-29T135509.169860.json\n load_test_report_20250129_135514.csv\n load_test_report_20250129_135514.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_135810/\n load_test_report_2025-01-29T135911.302460.csv\n load_test_report_2025-01-29T135911.302460.json\n load_test_report_2025-01-29T140017.766295.csv\n load_test_report_2025-01-29T140017.766295.json\n load_test_report_2025-01-29T140123.329253.csv\n load_test_report_2025-01-29T140123.329253.json\n load_test_report_2025-01-29T140229.087510.csv\n load_test_report_2025-01-29T140229.087510.json\n load_test_report_2025-01-29T140354.254251.csv\n load_test_report_2025-01-29T140354.254251.json\n load_test_report_2025-01-29T140522.596391.csv\n load_test_report_2025-01-29T140522.596391.json\n load_test_report_20250129_140527.csv\n load_test_report_20250129_140527.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_140726/\n load_test_report_2025-01-29T140828.249744.csv\n load_test_report_2025-01-29T140828.249744.json\n load_test_report_2025-01-29T140935.241087.csv\n load_test_report_2025-01-29T140935.241087.json\n load_test_report_2025-01-29T141041.737827.csv\n load_test_report_2025-01-29T141041.737827.json\n load_test_report_2025-01-29T141148.575547.csv\n load_test_report_2025-01-29T141148.575547.json\n load_test_report_2025-01-29T141257.979330.csv\n load_test_report_2025-01-29T141257.979330.json\n load_test_report_2025-01-29T141407.813467.csv\n load_test_report_2025-01-29T141407.813467.json\n load_test_report_2025-01-29T141517.031485.csv\n load_test_report_2025-01-29T141517.031485.json\n load_test_report_2025-01-29T141626.812125.csv\n load_test_report_2025-01-29T141626.812125.json\n load_test_report_2025-01-29T141738.980843.csv\n load_test_report_2025-01-29T141738.980843.json\n load_test_report_2025-01-29T141852.372524.csv\n load_test_report_2025-01-29T141852.372524.json\n load_test_report_2025-01-29T142006.313659.csv\n load_test_report_2025-01-29T142006.313659.json\n load_test_report_2025-01-29T142122.053494.csv\n load_test_report_2025-01-29T142122.053494.json\n load_test_report_20250129_142127.csv\n load_test_report_20250129_142127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_142324/\n load_test_report_2025-01-29T142426.095040.csv\n load_test_report_2025-01-29T142426.095040.json\n load_test_report_2025-01-29T142532.101781.csv\n load_test_report_2025-01-29T142532.101781.json\n load_test_report_2025-01-29T142638.130364.csv\n load_test_report_2025-01-29T142638.130364.json\n load_test_report_2025-01-29T142744.373122.csv\n load_test_report_2025-01-29T142744.373122.json\n load_test_report_2025-01-29T142851.436595.csv\n load_test_report_2025-01-29T142851.436595.json\n load_test_report_2025-01-29T142958.649875.csv\n load_test_report_2025-01-29T142958.649875.json\n load_test_report_2025-01-29T143105.820377.csv\n load_test_report_2025-01-29T143105.820377.json\n load_test_report_2025-01-29T143213.483254.csv\n load_test_report_2025-01-29T143213.483254.json\n load_test_report_2025-01-29T143322.075349.csv\n load_test_report_2025-01-29T143322.075349.json\n load_test_report_2025-01-29T143431.160350.csv\n load_test_report_2025-01-29T143431.160350.json\n load_test_report_2025-01-29T143540.792112.csv\n load_test_report_2025-01-29T143540.792112.json\n load_test_report_2025-01-29T143651.193158.csv\n load_test_report_2025-01-29T143651.193158.json\n load_test_report_20250129_143656.csv\n load_test_report_20250129_143656.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_144231/\n load_test_report_2025-01-29T144333.225207.csv\n load_test_report_2025-01-29T144333.225207.json\n load_test_report_2025-01-29T144441.892228.csv\n load_test_report_2025-01-29T144441.892228.json\n load_test_report_2025-01-29T144548.216391.csv\n load_test_report_2025-01-29T144548.216391.json\n load_test_report_2025-01-29T144654.207507.csv\n load_test_report_2025-01-29T144654.207507.json\n load_test_report_2025-01-29T144801.887104.csv\n load_test_report_2025-01-29T144801.887104.json\n load_test_report_2025-01-29T144907.892024.csv\n load_test_report_2025-01-29T144907.892024.json\n load_test_report_2025-01-29T145015.606306.csv\n load_test_report_2025-01-29T145015.606306.json\n load_test_report_2025-01-29T145124.318365.csv\n load_test_report_2025-01-29T145124.318365.json\n load_test_report_2025-01-29T145232.316758.csv\n load_test_report_2025-01-29T145232.316758.json\n load_test_report_2025-01-29T145338.561407.csv\n load_test_report_2025-01-29T145338.561407.json\n load_test_report_2025-01-29T145447.340833.csv\n load_test_report_2025-01-29T145447.340833.json\n load_test_report_2025-01-29T145556.603603.csv\n load_test_report_2025-01-29T145556.603603.json\n load_test_report_20250129_145601.csv\n load_test_report_20250129_145601.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_145926/\n load_test_report_2025-01-29T150027.790900.csv\n load_test_report_2025-01-29T150027.790900.json\n load_test_report_2025-01-29T150134.652497.csv\n load_test_report_2025-01-29T150134.652497.json\n load_test_report_2025-01-29T150242.312479.csv\n load_test_report_2025-01-29T150242.312479.json\n load_test_report_2025-01-29T150348.489497.csv\n load_test_report_2025-01-29T150348.489497.json\n load_test_report_2025-01-29T150454.976232.csv\n load_test_report_2025-01-29T150454.976232.json\n load_test_report_2025-01-29T150600.673114.csv\n load_test_report_2025-01-29T150600.673114.json\n load_test_report_2025-01-29T150708.380006.csv\n load_test_report_2025-01-29T150708.380006.json\n load_test_report_2025-01-29T150814.575034.csv\n load_test_report_2025-01-29T150814.575034.json\n load_test_report_2025-01-29T150923.544283.csv\n load_test_report_2025-01-29T150923.544283.json\n load_test_report_2025-01-29T151030.283486.csv\n load_test_report_2025-01-29T151030.283486.json\n load_test_report_2025-01-29T151138.589944.csv\n load_test_report_2025-01-29T151138.589944.json\n load_test_report_2025-01-29T151248.730621.csv\n load_test_report_2025-01-29T151248.730621.json\n load_test_report_20250129_151253.csv\n load_test_report_20250129_151253.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_160612/\n load_test_report_2025-01-29T160713.432216.csv\n load_test_report_2025-01-29T160713.432216.json\n load_test_report_2025-01-29T160819.907680.csv\n load_test_report_2025-01-29T160819.907680.json\n load_test_report_2025-01-29T160926.784918.csv\n load_test_report_2025-01-29T160926.784918.json\n load_test_report_2025-01-29T161033.828339.csv\n load_test_report_2025-01-29T161033.828339.json\n load_test_report_2025-01-29T161153.205639.csv\n load_test_report_2025-01-29T161153.205639.json\n load_test_report_2025-01-29T161315.237414.csv\n load_test_report_2025-01-29T161315.237414.json\n load_test_report_20250129_161320.csv\n load_test_report_20250129_161320.json\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n run_20250129_161925/\n load_test_report_2025-01-29T162025.734114.csv\n load_test_report_2025-01-29T162025.734114.json\n load_test_report_2025-01-29T162131.524371.csv\n load_test_report_2025-01-29T162131.524371.json\n load_test_report_2025-01-29T162237.758517.csv\n load_test_report_2025-01-29T162237.758517.json\n load_test_report_2025-01-29T162344.818406.csv\n load_test_report_2025-01-29T162344.818406.json\n load_test_report_2025-01-29T162507.384913.csv\n load_test_report_2025-01-29T162507.384913.json\n load_test_report_2025-01-29T162613.335853.csv\n load_test_report_2025-01-29T162613.335853.json\n load_test_report_20250129_162618.csv\n load_test_report_20250129_162618.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_162732/\n load_test_report_2025-01-29T162834.272459.csv\n load_test_report_2025-01-29T162834.272459.json\n load_test_report_2025-01-29T162941.672408.csv\n load_test_report_2025-01-29T162941.672408.json\n load_test_report_2025-01-29T163048.857712.csv\n load_test_report_2025-01-29T163048.857712.json\n load_test_report_2025-01-29T163157.624546.csv\n load_test_report_2025-01-29T163157.624546.json\n load_test_report_2025-01-29T163306.370415.csv\n load_test_report_2025-01-29T163306.370415.json\n load_test_report_2025-01-29T163416.065472.csv\n load_test_report_2025-01-29T163416.065472.json\n load_test_report_2025-01-29T163524.604470.csv\n load_test_report_2025-01-29T163524.604470.json\n load_test_report_2025-01-29T163632.880248.csv\n load_test_report_2025-01-29T163632.880248.json\n load_test_report_2025-01-29T163745.002002.csv\n load_test_report_2025-01-29T163745.002002.json\n load_test_report_2025-01-29T163902.036068.csv\n load_test_report_2025-01-29T163902.036068.json\n load_test_report_2025-01-29T164009.453151.csv\n load_test_report_2025-01-29T164009.453151.json\n load_test_report_2025-01-29T164122.568066.csv\n load_test_report_2025-01-29T164122.568066.json\n load_test_report_20250129_164127.csv\n load_test_report_20250129_164127.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_164620/\n load_test_report_2025-01-29T164721.700661.csv\n load_test_report_2025-01-29T164721.700661.json\n load_test_report_2025-01-29T164827.520353.csv\n load_test_report_2025-01-29T164827.520353.json\n load_test_report_2025-01-29T164933.310367.csv\n load_test_report_2025-01-29T164933.310367.json\n load_test_report_2025-01-29T165039.642351.csv\n load_test_report_2025-01-29T165039.642351.json\n load_test_report_2025-01-29T165154.098239.csv\n load_test_report_2025-01-29T165154.098239.json\n load_test_report_2025-01-29T165308.831481.csv\n load_test_report_2025-01-29T165308.831481.json\n load_test_report_20250129_165313.csv\n load_test_report_20250129_165313.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_165758/\n load_test_report_2025-01-29T165859.461686.csv\n load_test_report_2025-01-29T165859.461686.json\n load_test_report_2025-01-29T170005.472004.csv\n load_test_report_2025-01-29T170005.472004.json\n load_test_report_2025-01-29T170111.422122.csv\n load_test_report_2025-01-29T170111.422122.json\n load_test_report_2025-01-29T170217.557618.csv\n load_test_report_2025-01-29T170217.557618.json\n load_test_report_2025-01-29T170330.493971.csv\n load_test_report_2025-01-29T170330.493971.json\n load_test_report_2025-01-29T170447.558129.csv\n load_test_report_2025-01-29T170447.558129.json\n load_test_report_20250129_170452.csv\n load_test_report_20250129_170452.json\n results_test_u1_o1.csv\n results_test_u1_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o50.csv\n run_20250129_170950/\n load_test_report_2025-01-29T171051.361008.csv\n load_test_report_2025-01-29T171051.361008.json\n load_test_report_2025-01-29T171157.323565.csv\n load_test_report_2025-01-29T171157.323565.json\n load_test_report_2025-01-29T171303.299586.csv\n load_test_report_2025-01-29T171303.299586.json\n load_test_report_2025-01-29T171409.108765.csv\n load_test_report_2025-01-29T171409.108765.json\n load_test_report_2025-01-29T171514.861147.csv\n load_test_report_2025-01-29T171514.861147.json\n load_test_report_2025-01-29T171620.615624.csv\n load_test_report_2025-01-29T171620.615624.json\n load_test_report_2025-01-29T171726.893447.csv\n load_test_report_2025-01-29T171726.893447.json\n load_test_report_2025-01-29T171833.044767.csv\n load_test_report_2025-01-29T171833.044767.json\n load_test_report_2025-01-29T171939.151837.csv\n load_test_report_2025-01-29T171939.151837.json\n load_test_report_2025-01-29T172045.358719.csv\n load_test_report_2025-01-29T172045.358719.json\n load_test_report_2025-01-29T172151.647824.csv\n load_test_report_2025-01-29T172151.647824.json\n load_test_report_2025-01-29T172257.931381.csv\n load_test_report_2025-01-29T172257.931381.json\n load_test_report_2025-01-29T172404.993732.csv\n load_test_report_2025-01-29T172404.993732.json\n load_test_report_2025-01-29T172512.469972.csv\n load_test_report_2025-01-29T172512.469972.json\n load_test_report_2025-01-29T172619.912159.csv\n load_test_report_2025-01-29T172619.912159.json\n load_test_report_2025-01-29T172727.520335.csv\n load_test_report_2025-01-29T172727.520335.json\n load_test_report_2025-01-29T172836.287202.csv\n load_test_report_2025-01-29T172836.287202.json\n load_test_report_2025-01-29T172945.243054.csv\n load_test_report_2025-01-29T172945.243054.json\n load_test_report_2025-01-29T173054.878245.csv\n load_test_report_2025-01-29T173054.878245.json\n load_test_report_2025-01-29T173205.270695.csv\n load_test_report_2025-01-29T173205.270695.json\n load_test_report_2025-01-29T173319.135777.csv\n load_test_report_2025-01-29T173319.135777.json\n load_test_report_2025-01-29T173434.082094.csv\n load_test_report_2025-01-29T173434.082094.json\n load_test_report_2025-01-29T173550.513858.csv\n load_test_report_2025-01-29T173550.513858.json\n load_test_report_2025-01-29T173708.906195.csv\n load_test_report_2025-01-29T173708.906195.json\n load_test_report_20250129_173713.csv\n load_test_report_20250129_173713.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n run_20250129_174215/\n load_test_report_2025-01-29T174316.520550.csv\n load_test_report_2025-01-29T174316.520550.json\n load_test_report_2025-01-29T174422.384594.csv\n load_test_report_2025-01-29T174422.384594.json\n load_test_report_2025-01-29T174528.291764.csv\n load_test_report_2025-01-29T174528.291764.json\n load_test_report_2025-01-29T174633.925509.csv\n load_test_report_2025-01-29T174633.925509.json\n load_test_report_2025-01-29T174740.096886.csv\n load_test_report_2025-01-29T174740.096886.json\n load_test_report_2025-01-29T174845.697959.csv\n load_test_report_2025-01-29T174845.697959.json\n load_test_report_2025-01-29T174952.084484.csv\n load_test_report_2025-01-29T174952.084484.json\n load_test_report_2025-01-29T175058.845237.csv\n load_test_report_2025-01-29T175058.845237.json\n load_test_report_2025-01-29T175205.494738.csv\n load_test_report_2025-01-29T175205.494738.json\n load_test_report_2025-01-29T175312.831611.csv\n load_test_report_2025-01-29T175312.831611.json\n load_test_report_2025-01-29T175419.902976.csv\n load_test_report_2025-01-29T175419.902976.json\n load_test_report_2025-01-29T175527.241889.csv\n load_test_report_2025-01-29T175527.241889.json\n load_test_report_2025-01-29T175635.835204.csv\n load_test_report_2025-01-29T175635.835204.json\n load_test_report_2025-01-29T175744.448069.csv\n load_test_report_2025-01-29T175744.448069.json\n load_test_report_2025-01-29T175853.905293.csv\n load_test_report_2025-01-29T175853.905293.json\n load_test_report_2025-01-29T180003.565666.csv\n load_test_report_2025-01-29T180003.565666.json\n load_test_report_2025-01-29T180115.557518.csv\n load_test_report_2025-01-29T180115.557518.json\n load_test_report_2025-01-29T180228.466492.csv\n load_test_report_2025-01-29T180228.466492.json\n load_test_report_2025-01-29T180342.419821.csv\n load_test_report_2025-01-29T180342.419821.json\n load_test_report_2025-01-29T180457.796778.csv\n load_test_report_2025-01-29T180457.796778.json\n load_test_report_2025-01-29T180620.304565.csv\n load_test_report_2025-01-29T180620.304565.json\n load_test_report_2025-01-29T180746.057385.csv\n load_test_report_2025-01-29T180746.057385.json\n load_test_report_2025-01-29T180914.893498.csv\n load_test_report_2025-01-29T180914.893498.json\n load_test_report_2025-01-29T181046.064760.csv\n load_test_report_2025-01-29T181046.064760.json\n load_test_report_20250129_181051.csv\n load_test_report_20250129_181051.json\n results_test_u10_o1.csv\n results_test_u10_o15.csv\n results_test_u10_o30.csv\n results_test_u10_o50.csv\n results_test_u1_o1.csv\n results_test_u1_o15.csv\n results_test_u1_o30.csv\n results_test_u1_o50.csv\n results_test_u20_o1.csv\n results_test_u20_o15.csv\n results_test_u20_o30.csv\n results_test_u20_o50.csv\n results_test_u2_o1.csv\n results_test_u2_o15.csv\n results_test_u2_o30.csv\n results_test_u2_o50.csv\n results_test_u50_o1.csv\n results_test_u50_o15.csv\n results_test_u50_o30.csv\n results_test_u50_o50.csv\n results_test_u5_o1.csv\n results_test_u5_o15.csv\n results_test_u5_o30.csv\n results_test_u5_o50.csv\n starcoder2_run_20250129_123907.zip\n starcoder_run_20250128_20.zip\n starcoder_run_20250129_131828.zip\n test_single_request.py\n visualize_results.py\n temp_file_renamed.txt\n test_data.txt", + tool_failed: false, }, { role: "assistant", @@ -63,12 +61,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", - content: - 'Problems:\nThe path "README.md" is ambiguous. It could be interpreted as:\n/home/fupfv/git/benchmark1_0701/README.md\n/home/fupfv/git/benchmark1_0701/llm_load_test/README.md\n', - tool_failed: false, - }, + tool_call_id: "toolu_01DzbsASaCkQXY2fmUYZjXbj", + content: + 'Problems:\nThe path "README.md" is ambiguous. It could be interpreted as:\n/home/fupfv/git/benchmark1_0701/README.md\n/home/fupfv/git/benchmark1_0701/llm_load_test/README.md\n', + tool_failed: false, }, { role: "assistant", @@ -89,11 +85,9 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01LUEfJUST74ghMbHUmuByr5", - content: "Paths found:\n/home/fupfv/git/benchmark1_0701/README.md\n", - tool_failed: false, - }, + tool_call_id: "toolu_01LUEfJUST74ghMbHUmuByr5", + content: "Paths found:\n/home/fupfv/git/benchmark1_0701/README.md\n", + tool_failed: false, }, { role: "context_file", @@ -127,12 +121,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_019uJg1SQ5evTU9bov6iS9hU", - content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/VISUALIZATION.md\n", - tool_failed: false, - }, + tool_call_id: "toolu_019uJg1SQ5evTU9bov6iS9hU", + content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/VISUALIZATION.md\n", + tool_failed: false, }, { role: "context_file", @@ -166,12 +158,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01T48o7umn3FG5JVJWRgviUP", - content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py\n", - tool_failed: false, - }, + tool_call_id: "toolu_01T48o7umn3FG5JVJWRgviUP", + content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/llm_load_test_runner.py\n", + tool_failed: false, }, { role: "context_file", @@ -205,12 +195,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", - content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/load_test.py\n", - tool_failed: false, - }, + tool_call_id: "toolu_01WXJiwA9oLdRRAS4SADKvmM", + content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/load_test.py\n", + tool_failed: false, }, { role: "context_file", @@ -244,12 +232,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", - content: - "Paths found:\n/home/fupfv/git/benchmark1_0701/src/visualize_results.py\n", - tool_failed: false, - }, + tool_call_id: "toolu_01VxiuWGqPfbMSgprqkhe7uU", + content: + "Paths found:\n/home/fupfv/git/benchmark1_0701/src/visualize_results.py\n", + tool_failed: false, }, { role: "context_file", @@ -459,12 +445,10 @@ export const MARKDOWN_ISSUE: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", - content: - "Knowledge entry created successfully with ID: 594ad5ff35\nTools: documentation, markdown, technical writing\nApply to: benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide\nSearch Key: Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation\nLanguage/Framework: python/locust\nEntry: I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\n\n1. Main Documentation (README.md):\n - Provided a complete overview of the tool's functionality\n - Included installation instructions, configuration options, and usage examples\n - Documented all available metrics and how to interpret them\n - Covered troubleshooting and advanced usage scenarios\n\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\n - Instructions for generating and interpreting different types of visualizations\n - Best practices for effective visualization and analysis\n - Troubleshooting for visualization-specific issues\n\n3. Developer Guide (DEVELOPER_GUIDE.md):\n - Technical architecture overview of the codebase\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines for potential contributors\n\n4. Quick Start Guide (QUICK_START.md):\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. Documentation Index (index.md):\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\n\nKey insights from analyzing the codebase:\n- The tool uses Locust for load generation and metrics collection\n- It supports multiple LLM providers through a provider class hierarchy\n- Visualization is handled by matplotlib and seaborn\n- The architecture separates concerns between test execution, data collection, and visualization\n- The tool generates both CSV and JSON reports for further analysis\n\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it.", - tool_failed: false, - }, + tool_call_id: "toolu_01QS5KdB2vJApy8dgHkzjmyA", + content: + "Knowledge entry created successfully with ID: 594ad5ff35\nTools: documentation, markdown, technical writing\nApply to: benchmark1_0701, LLM Load Testing Tool, documentation, visualization tools, developer guide\nSearch Key: Creating comprehensive documentation for LLM load testing tool with Locust, including user guides, visualization documentation, and developer documentation\nLanguage/Framework: python/locust\nEntry: I created a comprehensive documentation suite for an LLM Load Testing Tool built with Python and Locust. The documentation was structured into several key components:\n\n1. Main Documentation (README.md):\n - Provided a complete overview of the tool's functionality\n - Included installation instructions, configuration options, and usage examples\n - Documented all available metrics and how to interpret them\n - Covered troubleshooting and advanced usage scenarios\n\n2. Visualization Guide (VISUALIZATION_GUIDE.md):\n - Detailed explanation of all visualization tools (visualize_results.py, compare_runs.py, dashboard_generator.py)\n - Instructions for generating and interpreting different types of visualizations\n - Best practices for effective visualization and analysis\n - Troubleshooting for visualization-specific issues\n\n3. Developer Guide (DEVELOPER_GUIDE.md):\n - Technical architecture overview of the codebase\n - Instructions for extending the tool with new providers, metrics, and visualizations\n - Code style guidelines and testing recommendations\n - Contributing guidelines for potential contributors\n\n4. Quick Start Guide (QUICK_START.md):\n - Streamlined instructions for getting started quickly\n - Basic configuration and test execution steps\n - Common issues and solutions\n - Pointers to more detailed documentation\n\n5. Documentation Index (index.md):\n - Central navigation hub for all documentation\n - Brief overview of the tool and its capabilities\n - Links to all documentation sections\n\nThe documentation was designed to be comprehensive yet accessible, with clear navigation between different sections. It covers both user-focused aspects (how to run tests and interpret results) and developer-focused aspects (how to extend and contribute to the codebase).\n\nKey insights from analyzing the codebase:\n- The tool uses Locust for load generation and metrics collection\n- It supports multiple LLM providers through a provider class hierarchy\n- Visualization is handled by matplotlib and seaborn\n- The architecture separates concerns between test execution, data collection, and visualization\n- The tool generates both CSV and JSON reports for further analysis\n\nThis documentation structure provides a solid foundation for users to understand and effectively use the LLM Load Testing Tool, while also enabling developers to extend and improve it.", + tool_failed: false, }, { role: "assistant", @@ -499,7 +483,6 @@ export const MARKDOWN_ISSUE: ChatThread = { }, currentMaximumContextTokens: 64000, currentMessageContextTokens: 1, - read: true, isTitleGenerated: true, createdAt: "2025-03-04T06:49:31.563Z", updatedAt: "2025-03-04T06:49:31.563Z", diff --git a/refact-agent/gui/src/__fixtures__/msw.ts b/refact-agent/gui/src/__fixtures__/msw.ts index 7d8a1449c..6f01a906a 100644 --- a/refact-agent/gui/src/__fixtures__/msw.ts +++ b/refact-agent/gui/src/__fixtures__/msw.ts @@ -2,15 +2,10 @@ import { http, HttpResponse, type HttpHandler } from "msw"; import { EMPTY_CAPS_RESPONSE, STUB_CAPS_RESPONSE } from "./caps"; import { SYSTEM_PROMPTS } from "./prompts"; import { STUB_LINKS_FOR_CHAT_RESPONSE } from "./chat_links_response"; -import { - TOOLS, - CHAT_LINKS_URL, - KNOWLEDGE_CREATE_URL, -} from "../services/refact/consts"; +import { TOOLS, CHAT_LINKS_URL } from "../services/refact/consts"; import { STUB_TOOL_RESPONSE } from "./tools_response"; import { GoodPollingResponse } from "../services/smallcloud/types"; import type { LinksForChatResponse } from "../services/refact/links"; -import { SaveTrajectoryResponse } from "../services/refact/knowledge"; import { ToolConfirmationResponse } from "../services/refact"; export const goodPing: HttpHandler = http.get( @@ -136,17 +131,6 @@ export const goodTools: HttpHandler = http.get( }, ); -export const makeKnowledgeFromChat: HttpHandler = http.post( - `http://127.0.0.1:8001${KNOWLEDGE_CREATE_URL}`, - () => { - const result: SaveTrajectoryResponse = { - memid: "foo", - trajectory: "something", - }; - return HttpResponse.json(result); - }, -); - export const loginPollingGood: HttpHandler = http.get( "https://www.smallcloud.ai/v1/streamlined-login-recall-ticket", () => { @@ -235,3 +219,102 @@ export const ToolConfirmation = http.post( return HttpResponse.json(response); }, ); + +export const emptyTrajectories: HttpHandler = http.get( + "http://127.0.0.1:8001/v1/trajectories", + () => { + return HttpResponse.json([]); + }, +); + +export const trajectoryGet: HttpHandler = http.get( + "http://127.0.0.1:8001/v1/trajectories/:id", + () => { + return HttpResponse.json({ status: "not_found" }, { status: 404 }); + }, +); + +export const trajectorySave: HttpHandler = http.put( + "http://127.0.0.1:8001/v1/trajectories/:id", + () => { + return HttpResponse.json({ status: "ok" }); + }, +); + +export const trajectoryDelete: HttpHandler = http.delete( + "http://127.0.0.1:8001/v1/trajectories/:id", + () => { + return HttpResponse.json({ status: "ok" }); + }, +); + +// Chat Session (Stateless Trajectory UI) handlers +export const chatSessionSubscribe: HttpHandler = http.get( + "http://127.0.0.1:8001/v1/chats/subscribe", + () => { + // Return an SSE stream that immediately closes (no events) + const encoder = new TextEncoder(); + const stream = new ReadableStream({ + start(controller) { + // Send a comment to keep connection alive, then close + controller.enqueue(encoder.encode(": keep-alive\n\n")); + // Don't close - let the client handle disconnection + }, + }); + return new HttpResponse(stream, { + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + }, +); + +export const chatSessionCommand: HttpHandler = http.post( + "http://127.0.0.1:8001/v1/chats/:id/commands", + () => { + return HttpResponse.json({ status: "queued" }); + }, +); + +export const chatSessionAbort: HttpHandler = http.post( + "http://127.0.0.1:8001/v1/chats/:id/abort", + () => { + return HttpResponse.json({ status: "ok" }); + }, +); + +// Sidebar subscription endpoint (SSE) +export const sidebarSubscribe: HttpHandler = http.get( + "http://127.0.0.1:8001/v1/sidebar/subscribe", + () => { + const encoder = new TextEncoder(); + const stream = new ReadableStream({ + start(controller) { + // Send initial snapshot with empty data + const snapshot = JSON.stringify({ + category: "snapshot", + trajectories: [], + tasks: [], + }); + controller.enqueue(encoder.encode(`data: ${snapshot}\n\n`)); + }, + }); + return new HttpResponse(stream, { + headers: { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + Connection: "keep-alive", + }, + }); + }, +); + +// Tasks list endpoint +export const emptyTasks: HttpHandler = http.get( + "http://127.0.0.1:8001/v1/tasks", + () => { + return HttpResponse.json([]); + }, +); diff --git a/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts b/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts index 7acbf9afe..7f3f1f264 100644 --- a/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts +++ b/refact-agent/gui/src/__fixtures__/some_chrome_screenshots.ts @@ -27,12 +27,10 @@ export const CHAT_WITH_MULTI_MODAL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_leDATFRCQJRefjC45EVpS0TW", - content: - "/\n Users/\n kot/\n code_aprojects/\n huddle/\n .gitignore\n README-template.md\n README.md\n index.html\n style-guide.md\n styles.css\n images/\n bg-desktop.svg\n bg-mobile.svg\n favicon-32x32.png\n illustration-mockups.svg\n logo.svg\n design/\n active-states.jpg\n desktop-design.jpg\n desktop-preview.jpg\n mobile-design.jpg", - tool_failed: false, - }, + tool_call_id: "call_leDATFRCQJRefjC45EVpS0TW", + content: + "/\n Users/\n kot/\n code_aprojects/\n huddle/\n .gitignore\n README-template.md\n README.md\n index.html\n style-guide.md\n styles.css\n images/\n bg-desktop.svg\n bg-mobile.svg\n favicon-32x32.png\n illustration-mockups.svg\n logo.svg\n design/\n active-states.jpg\n desktop-design.jpg\n desktop-preview.jpg\n mobile-design.jpg", + tool_failed: false, }, { role: "assistant", @@ -52,27 +50,25 @@ export const CHAT_WITH_MULTI_MODAL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_035coU8EfPMCt5kyzdjGP1Me", - content: [ - { - m_type: "text", - m_content: - "Start new chrome process.\nNo opened tabs.\nopened a new tab: tab_id `1` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nopened a new tab: tab_id `2` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\n test tripple ticks \n```\nstuff\n```\n might escape", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", - }, - ], - tool_failed: false, - }, + tool_call_id: "call_035coU8EfPMCt5kyzdjGP1Me", + content: [ + { + m_type: "text", + m_content: + "Start new chrome process.\nNo opened tabs.\nopened a new tab: tab_id `1` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `1` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nopened a new tab: tab_id `2` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `2` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\n test tripple ticks \n```\nstuff\n```\n might escape", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", + }, + ], + tool_failed: false, }, { role: "assistant", @@ -113,31 +109,27 @@ export const CHAT_WITH_MULTI_MODAL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_Z0bacXQ2J69R8l7SAavCp8IL", - content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `3` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `3` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - ], - tool_failed: false, - }, + tool_call_id: "call_Z0bacXQ2J69R8l7SAavCp8IL", + content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `3` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `3` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + ], + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_NmC0xtr0Boz6buWVVjpuiDHO", - content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `4` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `4` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - ], - tool_failed: false, - }, + tool_call_id: "call_NmC0xtr0Boz6buWVVjpuiDHO", + content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `4` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `4` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + ], + tool_failed: false, }, { role: "assistant", @@ -176,41 +168,37 @@ export const CHAT_WITH_MULTI_MODAL: ChatThread = { }, { role: "tool", - content: { - tool_call_id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", - content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `5` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", - }, - ], - tool_failed: false, - }, + tool_call_id: "call_KSF9MxJi5wAUyE7jrVZ8keHq", + content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `5` device `desktop` uri `about:blank`\n\nnavigate_to successful: tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `5` device `desktop` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAGYAyADAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs2VjcahMYrZAzKpdizBVVR1JY8AfWonUUFdgaP9jWEH/H7r9mrd0tUe4YfiAF/Ws/bTfwxfz0FcBbeG/unU9Tz/e+xJj8t+afNX/lX3/8AAHqRz6TbvaT3Onail2kCh5Y2haKRVJA3YOQRkjODxmhVZcyU1a4XK2laVda1qMdjZKjTyAlQ7bRwMnmrq1I0o80tgbsS61od94fvVtL9I1lZBINj7htJI6/gamjWjVjzRBO5dTwdrEmg/wBtLFD9i8ozZ80bto9qzeKpqp7PqF1sYGQO4rpAKACgAoA7i18C20/gU6+b2YT/AGd5hEFG35SePXtXBLFyVf2VtLk31scPXeUafh/TE1nXrPTpJWjSd9pdQCQME8Z+lZVqjp03NdAehva/4Mt9I8T6TpUV3K8d8VDO6jKZfbxiuajipTpSm1sJPQj8b+EbfwqbL7PdTTi4358xQNu3HTH1qsJiZVr8y2BO5yXeuwYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGvpnGga63cxwL+Blyf5CsJ/xYfP8g6irPov/AAjgiaCQ6n5uS4U9N3Zs4xtyMYznnNFqvtb390Nbl6S48LNrkbRW0i2AgKkOj7fMzwSobccLwcEZPOMVly4jk1eotStYmAReI5rZXW1+yskQc5YK0qBQffFaTv7ilvf9AL/w4/5Hmy/3Jf8A0A1nj/4DG9juvGPgW78TaxFewXsECpAIiroxOQSc8fWuDDYpUY8trkJ2Lt7pj6N8MbrTpZFke3sXQuoIB6+tRCftMQpd2G7MnwHa28nw+uXeCJm3T/MyAnp61ri5NYjR9hvc4/4aRRzeL4FlRXX7PIcMMjOBXZj21R0HLY2/EXh+LWfijDpyqIYGt0kmMahflAOce54Fc9Cs6eGcuok7I6DVfEXhnwdImjrpu/5QZI4YlIVT/eLdSaxp0a2I9+4JNl6+fT5PhzevpQVbF7KRolUYCg5JGO2DnjtWcFNYhKe90T1OW8A+G9Ni0STxHq0ccije0YkGVjRerY7nIP5V1YyvNz9lAqT6G1pPi7w54j1y2tlsnhuonLWkskarkgHIBB44zwetY1MNWpQbvp1E00UPG3/JRPDH++n/AKNFa4b/AHeoC2E+KVpJf3/h+zh/1k8kka59SUFLAyUYzk+lv1GjbGm2ng3TYY9L0GfU7l+HeNFLH1ZmPT2ArBzlXk3OVkTuZfijwzZ654al1iDTX07UYozK0boEZtvVWA4PGcGtcPiJU6ig3dDTszyGvZLCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKALthqTWC3CGCG4gnQLLFLnDYOQcgggg+9Z1KfPZ3s0BualpOm6bNLfXNu4tXSMW1okpBkkMas53HJCLu+pJA9a54Vak1yJ663fzFczhqOjKP+RfDH/avpD/SteSr/AD/gGpHd6uk1k9nZ6db2MMjq8vls7tIV+6CWJ4GegpxotS55O7HY2Phv/wAjxZf7kv8A6Aayx38FilsbvxK1nU9O8SQQ2WoXNvGbVWKRSFQTubniufA0YTptyV9RRWh0EdxNd/CJ57iV5Zn09yzucsx56mublUcVZdxdSn8MLq3vPDN3pZfE0cjllzzscdR+oq8fFxqqQ5bknhTwI3hjXDf3WoRSLtMNuqgqWLeue+B0FLEYv20OVL1E3cqatq0Gj/FyGe5YJBJaJC7nou7OCfbIFXTpueEaW9xpXRN4u+H91r+t/wBp2F3AgmVRIsueCBjIIBzxjilhsYqUOSS2BSsbF1pUeifDe906KXzRBZygv/ebkt9OSeKxjUdTEKb6tCvdmP4FuLTX/A8/h+SXZNGjxMB97YxJDAd8E/pW2LjKlXVRbDejuQeGvhxc6Rr0GoX99btFbvuiWLOXboM5HH05p18cqlNxitwcrj/G3/JRPDH++n/o0U8N/u9QS2JPiTfHTNZ8N323d9nmkkK+oBTI/KpwMOeFSPf/AIII39Vn1nVNOtb7wrf2hjcEsJkyHB6YPYjuDXPTVOEnGsmCt1Oa8UT+LtJ8Mm4vdVsH84mGaKOAAhWGPlJ6n144rpw6oVKtoxeg1a55T0r1ygoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAs3V9dXxiN1cSTeUgjj3nO1R0AqYU4w2QFaqAKAJ7S7ubC5W4tJ5IJlztkjbBGevNTKKkrSV0A+91C81KYTXtzLcShdoeVtxA9P1ohCMFaKsBMuuaqun/YF1C5Fnt2eQJDs2+mPSo9jT5ua2oWK1reXNhcLcWk8kEy/deNsEVcoxkrSVwLlz4h1m8nhmuNTupJIG3RMX+4fUY6H3rONClFNKO4WRUvL261C4NxeXEk8xABeRsnA6CtIwjBWirAXLXxJrVja/ZbXVLqKDGAiycAe3p+FRKhTk7uKuFkQprWpx2L2Sahci1fO6ESHac8nI96HRpuXNbULFa3uJrSdZ7eaSGVDlXjYqw/EVpKKkrNAX7rxHrV60DXOp3UjQMHiJfG1h3GO/vWUcPSje0dwsiC51fUby6iurm+uJbiHHlyO5LJg5GD25q40oRTilowsJf6rqGqFDf3s9yY87PNfdtz1xRClCHwqwWHafrGpaUW+wX09tu+8I3wD9R0pTown8SuFhl/ql/qkolv7ya5deAZWzj6DoKcKUYK0VYLFSrAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKAuFAXCgLhQFwoC4UBcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAKmoahFp8IeTJY8Kg6k1jWrKmrsyqVVFHPP4jvWfKCJF/u7c1wPF1HscjrSY3/hIr/+9F/37pfWqvcPbSD/AISK/wD70X/fuj61V7h7aQf8JFf/AN6L/v3R9aq9w9tIP+Ehv/70X/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJDf8ArF/37o+tVe4e2kH/AAkN/wCsX/fuj61V7h7aQf8ACQ3/AKxf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSD/hIr/8AvRf9+6PrVXuHtpB/wkV//ei/790fWqvcPbSD/hIr/wDvRf8Afuj61V7h7aQf8JFf/wB6L/v3R9aq9w9tIP8AhIr/APvRf9+6PrVXuHtpB/wkV/8A3ov+/dH1qr3D20g/4SK//vRf9+6PrVXuHtpB/wAJFf8A96L/AL90fWqvcPbSHJ4ivVcFhEw9NmKaxdRAq0kb+nalFqERZMq6/eQ9v/rV3Ua6qLzOqlVUi7W5sFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZyHiCVn1V0PSNVUD8M/1rycVJuozz6zvMy65zEKACgAoA1dC8Nax4lnkh0ixe6eJd0hBCqgPTJJAGaTdilFvY3v+FUeNf8AoDf+TMX/AMVRzIr2cuwf8Ko8a/8AQG/8mYv/AIqjmQezl2D/AIVR41/6A3/kzF/8VRzIPZy7B/wqjxr/ANAb/wAmYv8A4qjmQezl2D/hVHjX/oDf+TMX/wAVRzIPZy7B/wAKo8a/9Ab/AMmYv/iqOZB7OXYP+FUeNf8AoDf+TMX/AMVRzIPZy7B/wqjxr/0Bv/JmL/4qjmQezl2D/hVHjX/oDf8AkzF/8VRzIPZy7B/wqjxr/wBAb/yZi/8AiqOZB7OXYP8AhVHjX/oDf+TMX/xVHMg9nLsH/CqPGv8A0Bv/ACZi/wDiqOZB7OXYP+FUeNf+gN/5Mxf/ABVHMg9nLsVr/wCG3i7TbGa8utHkEEKl5GSVHKqOpwrE4pcyB05LocrVGYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADo43lkWONGeRyAqqMkn0A70AaX/AAjeu/8AQF1H/wABX/wpXRfJLsH/AAjeu/8AQF1H/wABX/woug5JdjNkikhlaKVGSRDtZWGCD6EdqZA2gAoAKANHQ5THq0QB4fKn8q2w8mqiNaTtJHZDpXsHoLYKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAAaAZxuu/8hib/gP/AKCK8fEfxWedV+NmdWJkFABQAUAe3fAb/kGa36+fF/6C1ZyOilsevVJqFABQAUAFABQAUAFABQAUAFABQAUAVdR/5Bd5/wBe8n/oBoB7Hx4Puj6Vscb3FoEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAFrTJPJ1S0kN41kFmU/alUkw8/fAHXHWk9io7np/9v2//AEVy9/8AANqzOn5h/b9v/wBFcvf/AADagPmeZatKJtXvJRfNfh5mP2t1Kmbn75B6ZrRbHNLcp0yQoAKALukf8he2/wB/+hrWh/ERpD4kdsOleyeitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUABoBnG67/yGJv+A/8AoIrx8R/FZ51X42Z1YmQUAFABQB2PgTx/ceCXvFWyS8t7raWjMmwqy5wQcHsemKlq5pCfKdr/AML6/wCpc/8AJ3/7ClyGntvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIP+F9f9S5/wCTv/2FHIHtvIP+F9f9S5/5O/8A2FHIHtvIqap8cri80y5tbXQ0t5po2jEr3O8JkYJxtGTzRyCdW62PJOgx6VZgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBNZ3T2V5BdRrG7wyCRVkQMpIOeQeo9qRSdnc7H/haOsf9A3Qv/Bev+NTyIv2j7B/wtHWP+gboX/gvX/GjkQe0fY4++u3v76e7lSJJJ5DIyxIEQE+gHQVSIbu7kFMkKACgC7pH/IXtv9/+hrWh/ERpD4kdsK9k9GOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA0CZy2sadfT6nLJDZXUkbbcOkDMDwOhArx8R/FZwVIvmZR/snUv+gbe/wDgM/8AhWFyOVif2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oG3v/AIDP/hRcOVh/ZOpf9A29/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WH9k6l/wBA29/8Bn/wouHKw/snUv8AoG3v/gM/+FFw5WH9k6l/0Db3/wABn/wouHKw/snUv+gbe/8AgM/+FFw5WH9k6l/0Db3/AMBn/wAKLhysP7J1L/oG3v8A4DP/AIUXDlYf2TqX/QNvf/AZ/wDCi4crD+ydS/6Bt7/4DP8A4UXDlYf2TqX/AEDb3/wGf/Ci4crD+ydS/wCgbe/+Az/4UXDlYf2TqX/QNvf/AAGf/Ci4crD+ydS/6Bt7/wCAz/4UXDlYf2TqX/QNvf8AwGf/AAouHKw/snUv+gbe/wDgM/8AhRcOVh/ZOpf9A29/8Bn/AMKLhysP7J1L/oG3v/gM/wDhRcOVh/ZOpf8AQNvf/AZ/8KLhysP7J1L/AKBt7/4DP/hRcOVh/ZOpf9A29/8AAZ/8KLhysP7J1L/oHXv/AIDP/hRcOVh/ZOpf9A69/wDAZ/8ACi4crD+ydS/6Bt7/AOAz/wCFFw5WH9k6l/0Db3/wGf8AwouHKw/snUv+gbe/+Az/AOFFw5WL/ZOpf9A29/8AAZ/8KLhyst6Zpt/DqUEktjdIitks8DqBx3JFbUH+8RdOL5kdYOleyd62CgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM94+Hoz4F03k9H7/wC21eBjP40jNo6bZ7n865xWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86AsGz3P50BYNnufzoCwbPc/nQFg2e5/OgLBs9z+dAWDZ7n86Asc/44XHgnVuT/qD39xW2G/jRBI8B9a+hNUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/WvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHPeOf+RJ1b/r3P8xW2G/jRA+f/AFr6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/wD0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/AJEnVv8Ar3P8xW2G/jRA+f8A1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/wAiNpv0f/0Nq8DGfxpGZ1Fc4BQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/ADFbYb+NED5/9a+hNEFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/wDIjab9H/8AQ2rwMZ/GkZnUVzgFABQAUAFACE4GT0oAyrnXIISViBlYdxwPzrgq4+EXaOp108HOWr0M59fuyflEa/8AAc1yvH1XtY6o4Gn1uCeILpT86RuPpirjjavVJg8BTezaNKz1u2uWCPmKQ9A3Q/jXZSxUJ6PRnJVwdSmrrVGrXUcoUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBz3jn/kSdW/69z/MVthv40QPn/1r6E0QUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAATigDl9V1RrmQwxNiEdx/Ef8K8bFYlzfJHb8z1cLhlFc0tzLLVyKJ3JDC1UojSGlqtRKsMLVoolJG7oesMJFtLhsqeI2PY+hrvw9V/DI8vG4RJe0h8zp67DywoAKACgCpdyOhUKxGc9KaIZW8+X/no3507IV2Hny/89G/OnZBdh58v/PRvzosguw8+X/no350WQXYefL/z0b86LILsPPl/56N+dFkF2J58v/PRvzosguySCaQzIC5IJ6VLRSZo0igoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAMzW7o21gQpw0h2D6d65cVPlp2XU6MJS56mvQ5MtXkqJ7qQwtVKJVhparUR2GFqtRKSGlqtRKsM34xg8+taKI+W532k3f27TYZj94jDfUcGu+DvG58xiaXsqrgXqoxCgAoAilgSXG7PHpQnYTRH9ji/wBr86d2FkH2OL/a/Oi7CyD7HF/tfnRdhZB9ji/2vzouwsg+xxf7X50XYWQfY4v9r86LsLIPscX+1+dK7CyHJaxowYZyPegLE9AwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAGPKkeN7qufU4oAcCCMjpQAMwUEkgAdSTQAiSJIMo6sPVTmgB1ADBNGX2B1Lf3QwzQA+gBjzRx43uq56bmAoAeDkZFACMwQZYgAdSTQAiSJIMoysPUHNADqACgAoAKACgAoAKACgAoA57xz/AMiTq3/Xuf5itsN/GiB8/wDrX0JogoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKAOb8TuQ9svbDH+VcOM1aR6mWrST9Dni1caieqkMLVaiUkM3VaiOw0tVqJVhharUSkhC1WojSOv8IyFtOmU9Fl4/ECuiCsjwc1jasn5HRVZ5gUAFAEE9x5O35c596aVxN2Ift//AEz/AFo5Rcwfb/8Apn+tHKLmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt/8A0z/WjlDmD7f/ANM/1o5Q5g+3/wDTP9aOUOYPt4/55/rRyj5g+3j/AJ5/rRyhzB9vH/PP9aOUOYngn84MduMe9DVhp3JqQwoAbI+yNmxnAJxQB55c3Ml5O00zFmY9+3sK6ErHM3c2vDF5KLl7UsWiKFgP7pFZ1Fpcum9bEXiS7lkvzbbiIowPl7EkZzTprS4TetjNsbuSyukliJHIyo/iHpVtXRKdmdV4iu5bXT1WIlWlbaWHUDGaxgrs1m7I44EqwYHDDnI61uYHaaZfSS6ILiT5pEVsn+9trCS96xvF+7c42eeS6laaZi7tySf6VslYxbudB4Xu5WlltWYmMLvXP8PP/wBes6i6mlN9Cn4iu5JtReAkiKLAC9icZzVQWlxTetinpl3LZ30TxEgMwDL2YE05K6FF2Z39YG4UAFABQAUAFABQAUAFAHPeOf8AkSdW/wCvc/zFbYb+NED5/wDWvoTRBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/ACI2m/R//Q2rwMZ/GkZnUVzgFABQAUAc54qiPk28w6KxU/j/APqrmxMbpM9LLJe/KJy5auVRPbsMLVaiOw0tVqJVhharUSrDS1WojsMLVoolJHc+E4TFo3mH/lrIzD6dP6VaVj5rNJ82IsuiN+g88KACgCGaWOPG8Zz04zQkJsi+02/9z/x2nZiug+02/wDc/wDHadmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/wC5/wCO0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/uf+O0WYXQfabf8Auf8AjtFmF0H2m3/uf+O0WYXQfabf+5/47RZhdB9pt/7n/jtFmF0H2m3/ALn/AI7Sswug+02/9z/x2lZjuiwEQj7q/lQMXy0/ur+VAChQvQAfSgBaACgAIzQBy154YlM7NaSJ5bHIVzjbWiqdzJ0+xp6Pow04NJI4eZxgkDhR6CplK5UY2I9Z0X7e4mhdUmAwd3RhRGdtAlG5S0/w40dwst3IhVDkIhzk+59KqVTTQmMO5t6hZRahaNA7Y5yrD+E+tRF2dzRq6sc4vhi6MuGmhCZ+8Mk/lWntEZcjOmtraG1tEt0x5ajHPf1zWTd3c0SSVjnbrwzL5xNrLGYieA5wV9vetVU7kOHY1tI0pNNRmZw8z/eYdAPQVEpcxUY2INY0T7dKLiCRUlxhg3Rv/r04ztowlG+qK2m+HmguVnupEIQ5VF5yfc05TurImMLO7Ok3D1rM1DcPWgA3D1oANw9aADcPWgA3D1oANw9aADcPWgA3D1oA57xyR/whOrf9cD/MVthv40QPAO5r6EtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKmoWi31lLbtxuHB9D2NTKPMrGlGo6VRTXQ88njkt5nhlXbIhwRXNyWPqqcozipR2ZCWqlE0sN3VaiVYaWq1EqwwtVqI0iews5dRvY7aLqx5P8AdHc1drIyxFaNCm5yPTreBLa3jgjGERQoHsKg+OnJzk5Pdk1AgoAKAIZhCQPNx7ZoVxOxFi0/2fzNPUWgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg5I7ZzhQpPsTRdjsh/2aH+4Pzouwsg+zQ/3BSuwsibpQMKACgAoAKACgAoArXt5DZWstxPIscUSF3djgKoGSTTSuS3Y8M1/483H2x4tB06FrdThZ7vdl/cICMD6nNaKn3OaVV9DF/4Xt4p/59NL/wC/T/8AxdPkQvayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/hevin/AJ9NL/79P/8AF0ciD2sg/wCF6+Kf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayD/he3in/AJ9NL/79P/8AF0ciD2sg/wCF7eKf+fTS/wDv0/8A8XRyIPayKup/GXxHqumXFhPa6cIp02MUjcEDOePm9qqHuSUl0D2sjk/+EkvP+ecH5H/Guv65U7Ift5B/wkl5/wA84PyP+NP65U7IPbyFXxLdgjdFCR6YI/rR9cqdkP28ja03VYtQBABSVRkoT29R6110cQqmnU3pVubQ0K6DcKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgDF1rQk1NPMjIjuVGAx6MPQ/40nG524PGvDuz1icPeWlzYymO5iaM9s9D9D3oUT6OjWp1VzQdysWqlE6EhharUSrFqw0y81OUJbREjvIeFH41Tstznr4qlh1eb+XU77RtFh0i3Kr88z/6yQ9/Ye1Zylc+XxeLniZXeiWyNapOUKACgAoAimgWbGSRj0pp2E1ci+xR/wB5qXMLlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUPsUf8AeajmDlD7FH/eajmDlD7FH/eajmDlD7FH/eajmDlD7FH/AHmo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/3mo5g5Q+xR/wB5qOYOUPsUf95qOYOUPsUf95qOYOUPsUf95qOYOUkht1hYsCSSMc027jSsTUhhQAUAFABQAUAFABQAUAea/Gy7ltvh9cpExXz54onx3UnJH6Crp7mFV6HzLWxyBQB2Vv8ACvxjc28c6aTtSRQyh50VsHpkE5FTzI09myT/AIVL40/6Bcf/AIFR/wCNPmQ/ZyD/AIVL40/6Bcf/AIFR/wCNHMg9nIP+FS+NP+gXH/4FR/40cyD2cg/4VL40/wCgXH/4FR/40cyD2cg/4VL40/6Bcf8A4FR/40cyD2cg/wCFS+NP+gVH/wCBUf8AjRzIPZyMLX/CmteGJIU1eyNv54JjYOrq2OoyCeRkcUJpkyi47mNTICgAoAKACgAoAKACgC/p+h6rq0bvp2m3d2kZCu0ERcKfQkUm0tylFvZFz/hDvE3/AEL+pf8AgM3+FLmXcfI+xnX+m32lziDULOe1mK7gk0ZQkeuD2pp3E01uVaZJc0lzHqtsR3fafoeK0ou1RWNIO0kduOle0eitgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv8AyI2m/R//AENq8DGfxpGZ1Fc4BQAUAFABQAUARSxRzIUkRXU9QwyKBqTi7xdmZknhrSJTk2ag/wCyxX+Rp8zOqOYYiKspixeHNJgYMtlGWH98lv50+ZhPH4mas5/oaiIqKFUBVHQAYFScjbbux9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeXfHP/kQm/6/If61dPc56ux82Vscoq/eH1oGfZEZHlp/uj+VYnYP4oAOKADigA4oAOKADigDyH48f8gzRP8ArvL/AOgrVRMquyPEa0OcKACgAoAKACgAoAKAO18DxeZaXZ+z+KpcSLzor4Qcfx/7X9KiRtD5/I6n7Of+fH4k/wDf2p+4r7zg/GaeXrUY8nW4v3K8aw2Zup6f7P8AXNWtjOW/+ZztUZlrTf8AkJ23/XQVdL44+qLh8SO5Fe2j0o7BQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/wChtXgYz+NIzOornAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA8u+Of/ACITf9fkP9aunuc9XY+bK2OUKAOpg+I/jC3gjgi165EcahVBCsQBwOSM0uVGntJdyT/hZ3jT/oP3H/fCf/E0cqDnl3D/AIWd40/6D9x/3wn/AMTRyoOeXcP+FneNP+g/cf8AfCf/ABNHKg55dw/4Wd40/wCg/cf98J/8TRyoOeXcP+FneNP+g/cf98J/8TRyoOeXcP8AhZ3jT/oP3H/fCf8AxNHKg55dzH1rxJrHiKSJ9W1Ca7MIIjD4AXPXAAAoSsS5N7mVTJCgAoAKACgAoAKACgCza6lfWSstpe3NurHLCKVkBPqcGlYpNrYsf2/rP/QX1D/wJf8Axosh80u5Uubu5vZBJdXE08gGA0shc49MmgTbe5DTJLWm/wDITtv+ugq6Xxx9UXD4kdyK9s9KOwUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/AJEbTfo//obV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAOf8AFPhew8W6d/ZmomYQGRZcwvtbK9OcH1pp2M3FS0Zxn/Ch/Cf/AD01P/wJH/xNV7RkexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FB/wAKH8J/89NT/wDAkf8AxNHtGHsUH/Ch/Cf/AD01P/wJH/xNHtGHsUH/AAofwn/z01P/AMCR/wDE0e0YexQf8KH8J/8APTU//Akf/E0e0YexQf8ACh/Cf/PTU/8AwJH/AMTR7Rh7FB/wofwn/wA9NT/8CR/8TR7Rh7FFXU/gx4Z0jSrvUbeTUDPawvNHvnBXcoyMjb0rSjNupFeaGqSTuedivoDpQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKACgCnqGpWumWxuLqQIg4Hqx9AO5rSlRnVlywV2c+JxVLDw56rsjh9R8d3crFLGJYI+zONzn+gr2qWUxSvUd3+B8liuI6snaguVd3qzFfxHq7tk6hcZ9mxXasFQX2EeVLNcbJ3dRlq18YavbEZufOUdVlUHP4jmsqmW0J7K3odNDPMbSesuZeZ2GieLbTVWWCUfZ7o8BGOQ30P8AQ14+JwFSguZaxPp8vzqjinyS92Xbo/RnSVwntBQAUAFABQBG00aHDMAfSiwrjftEX98UWYXQfaIv+egoswug+0Rf89BRZhdB9oi/56CizC6D7RF/z0FFmF0H2iL/AJ6CizC6D7RF/fFFmF0PSVJCdjA49KBj6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBkeKP+RV1b/rzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P8A+htXgYz+NIzOornAKACgAoAgurmKztpLiZtscalmPoBThBzkox3ZnVqRpQc5PRHkOta1PrN81xKSsYyIo88IP8fWvrMLhY4eHKt+rPzvH42pi6rlLbouyM3dXXY4LBuosFg3UWCwocgggkEdCKVrjV07o9N8H+IDqto1rctm7gA+b++vr9fWvmcxwfsJ80fhf4M+6ybMXiafs6nxx/Fdzqa849wKACgAoAzboH7Q3B7VS2Ie5DhvQ0CDDehoAMN6GgAw3oaADDehoAMN6GgAw3oaALVkCJG47UmNF6kWFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8ALYfQ0hdSWmMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/8AoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/AMiNpv0f/wBDavAxn8aRmdRXOAUAFABQBxfxDv2t9JgtFOPtEhLf7q84/MivVyiipVnN9P1Pn+IK7hQjTX2n+CPNd1fTWPjLBuosFg3UWCwbqLBYN1Fgsavh3UDp+vWc4OFMgR/dW4P8/wBK48dRVWhJeX5HoZbWdDEwn52foz2mvkD9DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa7rGhdyAqjJJ7CgDEfxTaLLtWKVkz98Afyq/Zsz9ojTt7iK7CTQtuRgcGoatuUnctUFBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgDzj4mbhc6cT90pIB9civoMjtafyPluIk7036nBb69+x81YN1FhWDdRYLBuosFg30WHYlt2JuYQv3i6gfXIrKpZQdzSlFuordz34dK+FP0lC0DCgAoAqTJcGQlGO3thsUKxLuM8u7/vH/vqndCsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u7/ALx/76ougsw8u7/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/wC+qLoLMPLu/wC8f++qLoLMPLu/7x/76ougsw8u7/vH/vqi6CzDy7v+8f8Avqi6CzDy7v8AvH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsw8u7/vH/AL6ougsw8u6/vH/vqi6CzDy7v+8f++qLoLMPLu/7x/76ougsy1EGEShzlu9JlIkoGFAGbrqSPpFwI8k4BIHpnmnHcmexw9dBzHUeFlkFvKzZ2M/y/lzWNTc2pnRVBqFAHOajrjrM0VswVVOC+Mkn2rzK2Jm5csNEelh8EpRUplS28RTwSjz282LPzZHI+lVRr1E/e1R0VMvhKPuaM6uN1kRXQ5VhkH2r0TxWmnZkcskyvhI9wx1oVhO4zzrj/njRZCuw864/540WQXYedcf88aLILslheR8+Ym3HSgaJaBhQAUAFABQAUAFABQAUAFABQAUAZHij/kVdW/685f8A0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo//AKG1eBjP40jM6iucAoAKACgDjviJppu9BW7jUl7R95x/cPB/ofwr1MnrKnX5X9r8zxs6w7q0Odbx/LqeTbq+usfG2E3UWCwbqLBYN1FgsG6iwWN7wfpx1PxLaptzFC3nSHsFXn9TgV5+ZVlRw8u70XzPSyvDutiYrotX8j22vjT7kKACgAoAqTSTrIQi/L2+XNCsS7jPOuv7p/75p6Cuw866/un/AL4p6Bdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXYeddf3T/AN8UaBdh511/dP8A3xRoF2HnXX90/wDfFGgXY6KS4aRQy/L3+XFJpDTZcpFBQAUAFABQAhGaAM19A06SXzDBgk5IDED8qfPInkRcjjWJkRFCoowABgCkJE9BYHpQB5vdl7e5lik4dGINeeqFmfU0EpwUo7MptNk4HJPAFdMKJ08lj0jTYnt9NtopPvpGob64rZK2h8lXmp1ZSjs2SSl9/HmYx/CRj9aZixmX/wCmv5rTJDL/APTX81oAMv8A9NfzWgAy/wD01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgAzJ/01/NaADMn/TX81oAMyf8ATX81oAMyf9NfzWgA/e/9NfzWkMciyMeWlX64oAkEbAg+Yx9jigCWgoKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAEcsSTRNHIoZHBVlPQg9RQm07olpSVnseK+LPDE/h69LIrPYSN+6l67f9lvcfrX2WXY+OJhZ/Et1+p8bmGXyw87r4Xt/kc5ur07Hm2E3UWCwu6iwWJLeGa7uEgt42kmkO1EQZJNRUnGnFyk7JGkKUpyUYq7Z7R4Q8NL4f0w+bhryfDTMOg9FHsK+LzDGPFVNPhW3+Z9jl+CWGp6/E9/8jpa4T0QoAKACgCrNdGKQqFzj3oSJbI/tx/uD86fKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMH24/3B+dHKHMWLebzkJK4wcUNWGncmpDCgAoAKACgAoAKACgAoAi/5aj6GkLqS0xhQBmajollqZDTIRIBgSIcH/69B0UMXVoaQenYgsPDWn2EomVXllH3WlOcfQVTkzSvmFetHlbsvI2qk4yNoo3OWUE0XFYT7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyD7PF/zzWi7CyHoioMKoA9qBjqACgAoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFABQBBcW8N3A8FxEksTjDI4yCKcZShJSi7NEThGcXGSujgtX+F9tOzS6Vdm2J58mUb0/A9R+te5h89nBWrRv5rc8WvksJO9J28mc7J8NfEKNhfskg/vCbH8xXorPMM1qmvkcDyXEJ6W+8u2Pwt1GVwb6+ggj7iIF2/XArGrn1NL93Ft+ehtSySbf7ySXpqd7oXhbTPD8Z+yRbpmGGnk5dvx7D2FeDisbWxL/AHj07dD28NgqOHXuLXv1NyuU6woAKACgAoAQgHsKADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAbR6D8qADaPQflQAoGKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAYGseKrHR5fIbfNcAZMcf8P1PauzD4GrXXMtEeTjs3oYR8r1l2X6lfTPGun39wsEiPbyOcLvIKk+me1XXy6rSjzboxwme4fETUGnFvvt9509cB7hG88aNtZsGgVxv2mH++Pyoswug+0w/3x+VFmF0H2mH++Pyoswuh6SpJnY2cdaLBcfQMKACgAoAKACgAoAKACgAoAKACgDI8Uf8AIq6t/wBecv8A6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8iNpv0f/ANDavAxn8aRmdRXOAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBF/y1H0NIXUlpjEPAOKBM8Fu72Se7mlmYmV3Znz65r7ijSjGmlHZH5tX5qlSU5btkP2j3rXkMeQ9v0KeW50Kwnmz5jwIWz3OOtfEYmMYVpRjsmz9GwkpToQlLdpFuUrv5jVuOpYCsTpZHlf+eCf99CgQZX/nhH/30KADK/8APCP/AL6FADlk2Z2xIM+jigB3nt/cX/vsUDuHnt/cX/vsUBcPPb+4v/fYoC4ee39xf++xQFw89v7i/wDfYoC4ee39xf8AvsUBcPPb+4v/AH2KAuHnt/cX/vsUBcUTOekYP/AxQFxQ8hIzHgeu6gRLQUFAGR4o/wCRV1b/AK85f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/wDobV4GM/jSMzqK5wCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAIv+Wo+hpC6ktMYUAeceKfh/cXN7JfaO0eZWLSW7nbhj1Kn39K97A5vGnBU63TZ/wCZ8/jsndSbqUuu6/yM/RfhxqE10r6s0cFspyyRvud/bI4AroxWdU+W1DV/gjDDZJPmvW0R6pHGsaKiAKqjAA7CvmW23dn0qSSshjwl2yCv4pmgdhv2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dvWP/v2KAsH2dv70f/fsUBYPs7f3o/8Av2KAsH2dv70f/fsUBYPs7f3o/wDv2KAsH2dv70f/AH7FAWD7O396P/v2KAsH2dvWP/v2KAsPSAAfMEJ9lxQFh4RVOQoB9hQMdQAUAFAGR4o/5FXVv+vOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/wD6G1eBjP40jM6iucAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgCL/lqPoaQupLTGFAEM88VtH5k0qRoP4nYAUJN6IcYSk7RV2Mtry2ugTbzxSgddjhsflTcWt0OVKdPSaa9SzSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/ACKurf8AXnL/AOgmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/wDQ2rwMZ/GkZnUVzgFABQAUAFABQBi6j4n0vTmMck/mSjrHENxH17CtYUZz2R24fL8RXV4qy7vQxW+IFuG+XT5iPUyAVssHLud6yOpbWaLNr4702YhZ45rcnuw3D9KUsHUW2pz1corwV42Z0ltdQ3cImt5UljPRkORXNKLi7M82cJQfLJWZPSJCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgTgUAeO63q82q6hLNIxKBiIkzwq9q9qhQUI2PsMJRhh6SjHfr6lK1vp7C6S5tpDHKhyCO/sfUV0uhGceWSFiFGpFxnqj2TTrsX2nW90BgTRq+PTIr56pHkm4dj5KpDkm49h06KZMmfZx0zUkMi8tf8An7/X/wCvT+RPzDy1/wCfv9f/AK9HyD5h5a/8/f6//Xo+QfMlhaOLOZw2fU0ikS+fF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4efF/z0X86AuHnxf89F/OgLh58X/PRfzoC4CWNjgOpP1osFySgYUAZHij/kVdW/685f/QTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/APobV4GM/jSMzqK5wCgAoAKACgDz7xP4qkmkex0+QrCvyySqeXPcA+n867qGH+1I+jy7LIpKrWWvRf11OQJrtSPbbEzVpEuQ0mqSIbLumavd6Rcia1kwP40P3XHuKmpQjVVmcmJw9OvHlkj1XRtXg1mwW6g4P3XQ9Ub0rxatKVKXKz5evQlRnySNGszEKACgAoAoXE0izsquQB6U0iG9SL7RN/z0anZCuw+0S/32osguw+0S/wB9qLILsPtEv99qLILsPtEv99qLILsPtEv99qLILsPtEv8AfaiyC7LFpK7uwZiRjvSaKTLlIoKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAi/5aj6GkLqS0xgaAPHvEWi3GjX8gaNjbOxMUoHBHofQivocHWhVil1PoqGMVSC116lDTtOu9Xu1t7OJnYnlsfKg9Sa661WnQjzSZNbERgrtns1jaJY2MFqhysMaoD64FfKzk5zcn1PAnJyk5PqOmDb+N/TsgNSSyPD/wDTT/v2KZIYf/pp/wB+xQAYf/pp/wB+xQAYf/pp/wB+xSAMP/00/wC/YpgGH/6af9+xQAYf/pp/37FABh/+mn/fsUAGH/6af9+xQAYf/pp/37FABh/+mn/fsUAPSN2H3iv1QUhkiQkH5mDf8BAoHYeEUdAPyoGOoAKAMjxR/wAirq3/AF5y/wDoJrSh/Fj6oD51FfRloKBhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAHpQDPefh7/yI2m/R/8A0Nq8DGfxpGZ1Fc4BQAUAFAGB4u1I6doj+W2JZz5SHuM9T+VbYanzz16HdltBVq6vstTy3NeukfXNiZq0iGxpNUkQ5CZq0iGxpNUkQ5HReDNUax1xIGb9zdfu2Hbd/Cfz4/GuXH0eelzdUedmNJVKXN1R6rXhHgBQAUAFAEElrHI25s59jQKw37FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnQFg+xRf7X50BYPsUX+1+dAWD7FF/tfnRcLEkUCRElc5PrQ3cEiWgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAEZljV9hkUMexYZosAn/AC2H0NAupLQMKAGsqspDAEHqCKNgEjjSNdqKqj0AxQ23uF7j6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAelAM95+Hv/Ijab9H/APQ2rwMZ/GkZnUVzgFABQAUAcH8QpD5thH/Dh2/HgV6GAXxM93JVbnfocQTXpJHtOQ3NUkQ5CZq0iHIaTVJENiZq0iHIktpDFdwSLwVkUj8xSnG8GjKrrBo91FfKHzIUAFABQBWlu/KkKbM496EhNkf2/wD6Z/rT5Rcwfb/+mf60couYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7f/wBM/wBaOUOYPt//AEz/AFo5Q5g+3/8ATP8AWjlDmD7eP+ef60co+YPt4/55/rRyhzB9vH/PP9aOUOYtRyebGHxjNJlD6ACgCjq9y9ppk0sf3wAAfTJxmnFXZMnZHCMxdizEljySeTXQc51fhu7kubdklYsYjtDHrjFYzVmbQdzeqDQKAOc1HXHWZorZgqqcF8ZJPtXmV8TNy5YaI9LD4JSipTKlt4inhlHnt5sWfmyOR9KqjXqJ+9qjoqZfCUfc0Z1cbrIiuhyrDIPtXonitNOzI5ZJlfCR7hjrQrCdxnnXH/PGiyFdh51x/wA8aLILsPOuP+eNFkF2SwvI+d6bcdKBoloGFABQAUAFABQAUAFABQAUAFABQBkeKP8AkVdW/wCvOX/0E1pQ/ix9UB86ivoy0FAwoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAD0oBnvPw9/5EbTfo/8A6G1eBjP40jM6iucAoAKACgDiPiHbMbWzugOEdkb8Rkfyr0Mvl7zietlNS05Q7nAZr1kj23ITOKtIlsQmqSIchufWrSIbEziqSIci5pFq19rFnbIMl5lz9Acn9BWeIkqdKUn2MK9Tlg2e318meAFABQAUAV5Z4Ufa4yfpQkxNoZ9pt/7n/jtVZiug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdoswug+02/9z/x2izC6D7Tb/3P/HaLMLoPtNv/AHP/AB2izC6D7Tb/ANz/AMdpWYXQ5J4HYKE5P+zSsx3RP5af3V/KgYeWn91fyoAcBgUAFABQBDc28d1bvBIMo4waE7O4mr6HLv4XuxLhJoimeGOQfyrX2iMvZs3dNsE06JYUO4nJZvU1nKVy4qxo0iwPSgDze7LwXEsUgw6MQQa89ULM+qo2nBSjsym8xJwOTXTCidKhY9I02J4NNtopPvrGob64rZK2h8jXkp1ZSjs2yWbdv4MmMfwkYpmLI8v6y/mtAgy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgAy/rL+a0AGX9ZfzWgBf3n/Tb81oAcqux5aVfrigB4jYEHzGPscUAS0FBQBkeKP+RV1b/rzl/wDQTWlD+LH1QHzqK+jLQUDCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAPSgGe8/D3/kRtN+j/8AobV4GM/jSMzqK5wCgAoAKAKOq6fHqmmz2cvCyLgH+6ex/A1dKo6c1NdDSjUdKamuh45e2k+n3clrcJtljOCPX3HtX0tOUakVKOzPpYVY1IqUdmVia1SByEzVpEOQhNUkQ2NzVpEtnoHgDQWjDavcLguu2AEdu7fj0FeFmuJUn7KPz/yPMxla/uI76vHOEKACgAoAryi33nzNu760K5LsMxaf7P5mnqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP9n8zRqGgYtP8AZ/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/Z/M0ahoGLT/AGfzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/2fzNGoaBi0/wBn8zRqGg9IbdxlVBHsaLsdkO+zQ/3B+dK7CyFW3iVgwQZFAWJaBhQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMzUdEs9Tw06ESAY8xDg//AF6Dow+Mq0NIPTsyCw8NafYTCZVeWUfdaU5x9BVOTNa+YV60eV6LyNqpOIjaKNzllBNFxWE+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsg+zxf881ouwsh6IqDCqAPagY6gAoAKACgDI8Uf8irq3/XnL/6Ca0ofxY+qA+dRX0ZaCgYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAB6UAz3n4e/8AIjab9H/9DavAxn8aRmdRXOAUAFABQAUAYeveG7TXIR5n7q4Qfu5lHI9j6iunD4qdB6arsb4fEzovTbseb6n4Z1bS2JltWliHSWEblP5cj8a92hjaNXZ2fmetDF06nUxm4ODwfQ12qxo5E1rY3l9IEtbaWZv9hCf16Up1aVNXm7GU6kY7s7bw/wCAWDrc6xtwORbKc5/3j/QV4+LzVSXJR+//ACOGti76QO/VQqhVAAHAA7V4rdzhHUAFABQAUAV5LRZHLFiCaBWG/Yk/vtRzC5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlD7En99qOYOUPsSf32o5g5Q+xJ/fajmDlJoYVhUgEnPrQ3caViSgYUAFABQAUAFABQAUAFAEX/LUfQ0hdSWmMKAMDWPFVjo8vkNvmuAMmOP+H6ntXZh8DVrrmWiPJx2b0MI+V6y7L9SvpnjXT7+4WCRHt5HOFLkFSfTParr5dVpR5t0Y4TPcPiJqDTi332+86euA9wjeeNG2s2DQK437TD/fH5UWYXQfaYf74/KizC6D7TD/AHx+VFmF0PSVJM7GzjrRYLj6BhQAUAFABQAUAFABQAUAFABQAUAZHij/AJFXVv8Arzl/9BNaUP4sfVAfOor6MtBQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgA9KAZ7z8Pf+RG036P/AOhtXgYz+NIzOornAKACgAoAKACgAoAia3hkOXiRj6lQaalJdQuyRVCjAAA9AKQC0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUARf8tR9DSF1JaYxDwDQJngt3eyT3c0szEyu7M+fXNfcUaUY00o7I/Nq/NUqSnLdsh+0e9a8hjyHt+hTy3OhWE83+seBCxPc4618RiYxhWnGOybP0bBzlOhCUt2kW5iN/MStx1JArE6WR5X/ngn/fQpkhlf8Angn/AH0KADK/88E/76FADlk2Z2xKM+jikMd9ob/nmP8AvsUDuH2hv+eY/wC+xQFw+0N/zzH/AH2KAuH2hv8AnmP++xQFw+0N/wA8x/32KAuH2hv+eY/77FAXD7Q3/PMf99igLh9ob/nmP++xQFwEznpED/wMUBccryFgDFgeu4UCJaCgoAyPFH/Iq6t/15y/+gmtKH8WPqgPnUV9GWgoGFABQAUAFABQAUAFABQAUAFABQB//9k=", + }, + ], + tool_failed: false, }, { role: "tool", - content: { - tool_call_id: "call_W1ae766eqQMvHBnmVvUoUtfw", - content: [ - { - m_type: "text", - m_content: - "opened a new tab: tab_id `6` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", - }, - { - m_type: "image/jpeg", - m_content: - "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", - }, - ], - tool_failed: false, - }, + tool_call_id: "call_W1ae766eqQMvHBnmVvUoUtfw", + content: [ + { + m_type: "text", + m_content: + "opened a new tab: tab_id `6` device `mobile` uri `about:blank`\n\nnavigate_to successful: tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`\nmade a screenshot of tab_id `6` device `mobile` uri `file:///Users/kot/code_aprojects/huddle/index.html`", + }, + { + m_type: "image/jpeg", + m_content: + "/9j/4AAQSkZJRgABAgAAAQABAAD/wAARCAMfAXEDAREAAhEBAxEB/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDna+nNAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtbaa9uora3TfNK21FzjJqZSUVeWwGmulaZAM3uuwbv+ednE05/76+Vf1rL2s5fDH7wuGPDK8FtZk/2gsK/pk0/3++n4i1HJpel6gzRaZfXP2nazJBdQBfMwCSA6sRnAOMjmpdSpDWa09R6mZYWkmo39tZwlRJcSLGhY4GSeM1tOShHmA1fEfhS/8MNbi9kt3+0BinksT93Gc5A9RWNDExrX5VsCdyxpPgnU9Z0VtVtpbVYF3/LI5DHb16DFTUxcKdTkaFzHNqrOMqrH6DNdLaW4wAJOACT6CnsAFSpwwIPoRihNPYByRyOGKRuwX7xVSQPr6Urq9gO703wRp154BfXHnuRdCCWUKrDZlScDGPb1rz54uca6h0J5jga9H1KNnw5oy6r4jstOvBNDFcMckDa2ApPGR7VhXq8lNyjuJs3td8HWGm+M9J0iCa4Nve7d7OQWXLEHBx7Vz0sVOVGVR7oL3RV8d+GLLwzd2UdlJO6zxszeawOCCBxgD1q8JiJ1k+boCdzlEjklJEaO5HUKpOPyrrbS3GNp7gFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBseGONcVu6wXDD6iF6xxHwfNfmDG6Rfabaafex3tibiaWMCFsA7flIxk/d5Ktkc/LjvRVp1JSTg7ICx/aWieTpCf2Uxa3YG7PA80Y5Gc/Nk884x0qPZ1bytL0FqT6fPZzeLTd2MHk2sNvLIV2heVhbLbQSFy3bJxmpkpRo2m7u6/MZQ8K8eKtHH/T1F/OtcQv3UvQHsew+L/B48VtaE3ptvs2/pFv3bse4x0rxsPiXRvZXuRF2JtK0H/hHPCdxpwuPtG1Jn3lNv3gT0yaU6vtaqk0F7s574RAHQL7p/x8j/ANAWujML88fQctzjfAYB+IFkMf8ALSX/ANAau3F/wH8hvY6nxjoy658SdKsGJWOS2BlK8HYrMT+PGK5MNV9nh5S8xJ6Grrni/S/BUsOk2mmb8IGaOIhFRT07ck4rKjhqmITnJgk3qX5b2w1H4e313psQitpbSZhHtxtbB3AgdDnNZqMo11GQupzXw/0bTtO8OS+JdQjV3Ad0Zl3eWi8Egf3iQf0roxlaU6nsojbvoaWiePdM8Sa5b2c2nNBMGLWssjBvmwf++SRn1FZ1cHUpU3JMHGyKni3/AJKj4Z+if+htWmH/AN2mJbDPiLpzav4p8P6erbTcB0Lf3RuXJ/LNGDn7OlOQ47HSzw3Phuxt7Tw3oC3K/wAZMyxgfUnlmNcqaqycqkidzC8c+H4NS8MvrRsRZalAgkkTgkjPzKxHDeoNdGEruFXkvdFJ6nkNez6FBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBPZ3k9hdx3Vu4WWM5UkAjkYIIPUEEjFTOKnFxYHSvZ2cukWGt3lnDDbrHJ5kdsnli5l8whEGOnAJJHQD3rk5pKbpQd9vkIyhrNqvTw/pX4rKf8A2etfYy6zYDZ9dmktpYILKws0mXZIba32My5ztLEk44H1qlQjdNybGO8L/wDI16T/ANfcf/oVGJ/hS9BM7v4tXE8Emk+TNLHkS52OVz930rz8vipc10KJq+BpJJvh3M8jvI3+kfM7Env3NZ4pJYjTyFLcxPhNq1vCt3pUrqk0rLNECcb/AJcED34BrbMacnyzQ5G1pngjTvDXiJdYl1FvLMpS2hdQuHfgDP8AF1wOKwnip1afJYVzO8XaumhfEvSb+UHyUtQsuByEZmBP4dfwrTDUnUw8ore41saHiTwTbeL7qHV7DUkj8yNVZgnmI4HQjBGD2rOhi5UIuDQJ2NB7Cy0v4eX9jYTieGC1mRpAQdz4O7OO+c8VmpynXUpCW5z/AMP9SsdY8LTeGbyQJKFdFXOC8bc5X3BJ/SujGU5QqqrHYclqWdC+H1r4d1u3v73VFmKvttYynl7nIOM88nGeBU1sZOrBxSBy0IvFv/JUPDX0T/0NqrD/AO7TEthnxD1I6R4r8PagF3fZw7lfUblBH5E0YODqUpw7jWxv6gl/4ktLa/8ADPiAW0ZXDrsDK314yrDpisIONJtVY3Fscl45TV9H0aCC58TPdvcZSe3ZFXcvqoAzt7HNdWE9nUqO0LDR5vXqFhQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHmWRoliMjmNSSqFjtBPUgUuVXcrasBlMAoAVWZGDKSGByCDgii1wHyzzT486aSTHTe5bH51MYxWysA5Lm4jj8uOeVEP8KyED8hTcYt3cQIgSpBUkEcgg4xT9QJp726uSpnup5Sn3TJKzbfpk8UlCC2QaEckskz75ZHkbGMuxJ/M0JJbKwEkN5dWyMkF1PEj/eWORlB+oBpShGTu4oBqzzJEYlmkWM9UDkKfw6UcqvdpARglSGUkEHIIOCKq1+gE817d3DI011PIyfcLysxX6ZPFSqcVeyDQY1xM8gkeaVpF6MzkkfQ0KEUrJaBYSWaWcgzSySEDALsWx+dCjGOyAdBdXFqxa3uJYSepjcrn8jRKEZboBkssk0hklkeSRurOxYn8TTSSVkAymAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAVZtSs7eQpLcxq46jOcflWMsRTi7XM5Vopkf9s6f/z9J+R/wqfrVIn6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z0//AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB9YiH9s6f/z9J+R/wo+tUg+sRD+2dP8A+fpPyP8AhR9apB9YiH9s6f8A8/Sfkf8ACj61SD6xEP7Z0/8A5+k/I/4UfWqQfWIh/bOn/wDP0n5H/Cj61SD6xEP7Z07/AJ+k/I/4UfWqQfWIh/bOn/8AP0n5H/Cj61SD6xEP7Z0//n6T8j/hR9apB7eJJBqNncybIrhGb+70P61UK9OTsmVGrFvctVsahQIKACgAoAKACgAoAiunMdrM68MsbEfUCs6r5YOxFR2jc4Iknknk8k141+p5rYlIRreGdAn8T6/baRbzRwyT7j5kgJVQoJPT6UnoXGNz0T/hRGp/9B2y/wC/L1POaeyYf8KI1P8A6Dtl/wB+Xo5w9kw/4URqf/Qdsv8Avy9HOHsmH/CiNT/6Dtl/35ejnD2TD/hRGp/9B2y/78vRzh7Jh/wojU/+g7Zf9+Xo5w9kw/4URqf/AEHbL/vy9HOP2RheLfhbf+E9DbVZtStbmJZVjZI0ZWG7gHmnzXIlTaRwVUZBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQBuab4bOpWSXI1XT4NxI8uYybhg452oR+tS2Wo3K+r6KdJWEm/tLrzCRi3L/Lj13KKaYONjLpkChipDKSGHII7GhNp3Q07HfwuZII3PVlBP4ivcg7xTPTg7xQ+qKCgAoAKACgAoAKAIL7/jxuP+uTfyNZV/gfoZ1fgZwdeMeaFAG14S8QHwt4ltdXFsLjyNwMW/buDKV64OOtJ7Fxdj0/8A4X1F/wBC5J/4GD/4ip5DT2q7B/wvqL/oXJP/AAMH/wARRyB7Vdg/4X1F/wBC5J/4GD/4ijkD2q7B/wAL6i/6FyT/AMDB/wDEUcge1XYP+F9Rf9C5J/4GD/4ijkD2q7B/wvqL/oXJP/Awf/EUcge1XYP+F9Rf9C5J/wCBg/8AiKOQPao53xp8VR4t8PNpKaObUPKkjSNcb/unOANopqNhSqXVjziqMQoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdLo/i59J02OzFvdOELHdHqc8I5OfuIcCpsaKSSKuv8AiJtdWBWhnj8ok/vb6W4zn0Dk4/ChIUpJmJVEAelAHfWv/HpD/wBc1/kK9un8CPSp/CiWrLCgAoAKACgAoAKALmlWMOp6vZ2FyGMFzMsMgVsHaxwcHtWOI/hy9CZq6sel/wDCjfBv/PK//wDAs/4V4HOzD2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lEP+FG+Dv8Anlf/APgWf8KOdh7KIf8ACjfB3/PK/wD/AALP+FHOw9lEP+FG+Dv+eV//AOBZ/wAKOdh7KIf8KN8Hf88r/wD8Cz/hRzsPZRD/AIUb4O/55X//AIFn/CjnYeyiH/CjfB3/ADyv/wDwLP8AhRzsPZRD/hRvg7/nlf8A/gWf8KOdh7KIf8KN8Hf88r//AMCz/hRzsPZRD/hRvg7/AJ5X/wD4Fn/CjnYeyiH/AAo3wd/zyv8A/wACz/hRzsPZRD/hRvg7/nlf/wDgWf8ACjnYeyiH/CjfB3/PK/8A/As/4Uc7D2UQ/wCFG+Dv+eV//wCBZ/wo52Hsoh/wo3wd/wA8r/8A8Cz/AIUc7D2UQ/4Ub4O/55X/AP4Fn/CjnYeyiH/CjfB3/PK//wDAs/4Uc7D2UQ/4Ub4O/wCeV/8A+BZ/wo52Hsoh/wAKN8Hf88r/AP8AAs/4Uc7D2UQ/4Ub4O/55X/8A4Fn/AAo52Hsoh/wo3wd/zyv/APwLP+FHOw9lEP8AhRvg7/nlf/8AgWf8KOdh7KIf8KN8Hf8APK//APAs/wCFHOw9lEP+FG+Dv+eV/wD+BZ/wo52Hsoh/wo3wd/zyv/8AwLP+FHOw9lET/hRvg3/nlf8A/gWf8KOdh7KJ5he20dnf3NrDkRQSvEmTk7VYgZP0FfQ0nemvQ6IqysQVoMKACgAoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAh60gPmvV/+Q3qH/X1L/6Ga+ko/wAOPoWtinWgwoAKACgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAUAGaAEzQAZoAWgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBD1pAfNer/8AIb1D/r6l/wDQzX0lH+HH0LWxTrQYUAFABQAUAFABQBq+Gf8AkatJ/wCvuL/0IVjiP4U/QUtj6Mr54gKACgDK1DV47RjFGA8o6+i/WuLEYtU3yxV2dVDCyqavYyX129zkOoHoFFcf1ys2d0cDSsWbTxH84W7UBT/Gvb6iuqji29Joxq5fZXpnQq6uoZTkHkEd67k7nmvR2FpgI7BFLNwBQBD9rh/vfpRYV0H2uH+9+lFgug+1w/3v0osF0H2uH+9+lOwXRKkiyDKnIpBcdQMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPWkB816v/AMhvUP8Ar6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKAKt/cfZbGWYdVXj69BWVaXLBs0ow56iicS8pJJJyTyTXi8vM7s+hjBJWRC0lbRgaKJG0laxgWonU+Fr1praS2Y5MJBX/dNd1Hax4uZUVCamup0NbHmjZEEiFT0NAFf7FH6t+dPmZPKg+xR+rfnRzMOVB9ij9W/OjmYcqD7FH6t+dF2HKiaKNYl2qeM55pFD80AGaADNABmgAzQAZoAM0AGaADNABmgBc0AFAEVzcw2kLTTuEQd6EribsRWeo219GzwSZC/eBGCKbTW4JpkVvrFjdXPkRTZftwQG+hpuLSuLmV7C3OsWVpceRLNh++ATt+tJRbBySJLvUbayjWSeTAb7uBnP0oSbG5JCpqFrJZm6WUeSBkse1FnewXVrjLPVLS/LLBJll5KkEHHrQ01uCkmXKQwoAKACgBD1pAfNer/APIb1D/r6l/9DNfSUf4cfQtbFOtBhQAUAFABQAUAFAGr4Z/5GrSf+vuL/wBCFY4j+FP0FLY+jK+eICgDO1qJpNJuAvLBd35HNZVYuUGjowkuWtG5wjSVxRgfSqJGZK1jAtRI2kraMC1E6fwZGzG6n/gO1B9eT/hWqjY8XN5K8YnW1R4wyXb5Tbs7cc460Ayni3/uy09SdAxb/wB2WjUNAxb/AN2WjUNAxb/3ZaNQ0DFv/dlo1DQMW/8Adlo1DQMW/wDdlo1DQMW/92WjUNAxb/3ZaNQ0DFv/AHZaNQ0DFv8A3ZaNQ0DFv/dlo1DQTFv6S0ai0DFv/dlo1HoSx28MoyocD3OKAsiWO3SNty5z7mkOxNQMoavp7alZeSjhXDBlJ6Z96cXZkyVylpWivYxT+fIC0y7MIeg/xqpSuxRjZFWw8PS22oJLLMhjjbcu3OW9PpTc7qxKiri6j4flur95opkCSHLbs5U/1ojOyFKKbLGq6M15b26wSAPAuz5+44/wpRnZjkk0LDouzRZbJph5kjbywHAPGP5UOXvXGkrWGaNo0lhctPPIpbbtVUz+ZpzncUUkbu8VmaXQbxQF0G8UBdBvFAXQbhmgLo+bdX/5Deof9fUv/oZr6Oj/AA4+haasUsVoVdBQAUAFABQAUAFAGr4Z/wCRq0n/AK+4v/QhWOI/hT9BS2PoyvniAoAQjIII4oA4fW9Ans5XmtY2kt2OcLyU9vpWfs1c+gwWOhNKNR2aOeaTHB4PvWkaZ6ys1dFrT9KvdUlCwRMEz80rDCj8e9aWSMMRi6NCOr17HounWEem2UdtEPlTqe5Pc1mfK16sq1Rzl1LdBkNcMUO0gN2JoAh2XX/PVPyp6C1DZdf89U/KjQNQ2XX/AD1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/z1T8qNA1DZdf8APVPyo0DUNl1/z1T8qNA1DZdf89U/KjQNQ2XX/PVPyo0DUNl1/wA9U/KjQNQ2XX/PVPyo0DUNl1/z0T8qNA1Jx05pDFoAKACgAoA80+NHifUPD3ha3j02ZoJ72fymmQ4ZECknB7E8DP1q4JN6mNaVlofOtvc6rf3kVvBc3k1xO4REEzFnYnAHWtbI5k2zpf8AhA/iD/0DNS/8CR/8XS0K5Zh/wgfxB/6Bmpf+BI/+Lo0DlmH/AAgfxB/6Bmpf+BI/+Lo0DlmI/gX4gIjM2m6nhQScXAP/ALNRoFpHK/2hff8AP7c/9/m/xp2RF2J/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZh/aF9/z+3P/f5v8aLIOZj4rzUZpUijurt5HYKqrM2ST0A5osg5mXn0LxIoZ30/UQACWJVvxNPnb6j94y1urhSGWeUHsQ5qlJ9xczR2Ok3L3enRyycvypPrg9a9XDzc4XZ30Zc0dS7W5qFABQAUAFAGr4Z/5GrSf+vuL/0IVjiP4U/QUtj6Mr54gKACgBCKBEbW0LtuaGNm9SoJouWpySsmPCgYAAwKCR1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAeNftB/wDIC0b/AK+3/wDQK0gc9bY8R0HUV0fxDp2pSRNIlrcxzMinBYKc4FaNaHOnZntP/C89A/6BepflH/8AFVHIb+2Qv/C89A/6Bepf+Q//AIqjkF7VB/wvPQP+gXqX/kP/AOKo5A9qhknxy0IxOF0rUixUgA+WBnH1p8oe1Vjwgkkk46nNUjBiUxBQAUATWsqwXkEroWRJFZlwDkA9MMCPzBFA07M62bxZpUkMiLp0wLKQM2tmOo9ov5VHKauascZzxVGR2Hh//kER/wC83869XB/wzuw/wmma6jcKACgAoAKANXwz/wAjVpP/AF9xf+hCscR/Cn6ClsfRlfPEBQAUAVbzUbTT4vNu50iTsWPX6DvWlOlOo7QVzCviaVCPNUlZGKfHGjb9u6cjP3vK4rs/szEWvY8v/WDBXtd/cbNlqdnqMXmWk6SqOu08j6jqK46lKdN2mrHp4fFUsRHmpSui1mszoFoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMbXvDOj+Jo4oNYsUu44WLxq5I2t0zwR2pp2JlFPcxP8AhU/gj/oX7f8A7+P/APFU+Zk+yiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyiH/Cp/BH/AEL9v/38f/4qjmYeyiH/AAqfwR/0L9v/AN/H/wDiqOZh7KIf8Kn8Ef8AQv2//fx//iqOZh7KIf8ACp/BH/Qv2/8A38f/AOKo5mHsoh/wqfwR/wBC/b/9/H/+Ko5mHsoh/wAKn8Ef9C/b/wDfx/8A4qjmYeyieaeNtF07w/4jaw0u1W2tVhRxGpJGTnJ5NezgdaRrTjbY5012FhQAUAFABQBq+Gf+Rq0n/r7i/wDQhWOI/hT9BS2PoyvniAoArX15HY2U91L/AKuJC5/CqpwdSaguplXrKlTlUeyVzxzU9XuNVvXurhyWP3V7IPQV9fh8NGhBQivU/OcZiamKqOpN+hT82t+U5OUs2Gp3Gm3cdzbOVkQ9OzD0PtWNfDwrQcZo6cLXnhqiqU3qex6XfJqWm295H92Vd2PQ9x+dfI1qTpVHTfQ/RsNXVelGquqLlZm5DcRtJHtU4OfWgTKv2Sb1H/fVO6Jsw+yTeo/76p3QWYfZJvUf99UXQWYfZJvUf99UXQWZchUpEqt1FSUiTNAwzQAZoAM0AGaADNABmgAzQAZoAM0AFABQAhOKAGjmT8KAH0AJmgBaACgAoAKACgAoAKACgAoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAc7413f8IlfbOwUt9NwzXbljX1qFzzM3TeDml/Wp475lfZcp8Jyh5tLlDlDzaOUOU9c8A7z4UgLZwZJCv03f/rr5LNbfWpW8j7jJVJYSN/M6ivOPWGS7tnyMFPqaAIP9I/56xU9Bah/pH/PWKjQNQ/0j/nrFRoGof6R/z1io0DUP9I/56xUaBqH+kf8APWKjQNQ/0j/nrFRoGof6R/z1io0DUP8ASP8AnrFRoGof6R/z1io0DUP9I/56xUaBqH+kf89YqNA1D/SP+esVGgah/pH/AD1io0DUXFyekiflRoGpJGJgT5jKR2wKQaktAzD8TR3MlpF5Idowx8wJ+n4VcLX1M6l7aC+G47mO0cThgpb92G6gd/wzRO19Ap3tqa88qwQvK33UUk1lJ2VzWMXJqKOZPimRJwXiTys8gdQPrXHDEVJS20PV/s1cu+p0rSHyw6YOcYycV3HkvQZ50n92P/vugVw86T0j/wC+6AuS+Yn94fnQFw81P7w/OgLh5qf3h+dAXDzE/vD86AuHmp/eH50BcVXVjgMCaBjqAPEPid/yOkv/AF7x/wBa9vAfwi4nG12DCgAoAKACgDV8M/8AI1aT/wBfcX/oQrHEfwp+gpbH0ZXzxAUAQXVrHeWstvMu6KVCjj1BFVCThJSW6M6lNVIOEtmeG+INDvPD1+0FwrGEk+TNj5ZB/j6ivtcFi6eJgmn73VHxOMwM8PNprTozI8yu2yOPlNPRNHvdev1tbRDjI8yXHyxj1J/p3rlxWKp4aHNN+iOrC4KeJmowPc7Cyi06xgtIBiKFAi/h3r4epOVSbnLdn3FKlGlBQjsi1UmhFPgxnchcZ6CgGVcR/wDPtJTJDEf/AD7SUAGI/wDn2koAMR/8+0lABiP/AJ9pKADEf/PtJQAYj/59pKADEf8Az7SUAGI/+faSgAxH/wA+0lABiP8A59pKADEf/PtJQABYyQPs8lAWLH2SL+7+ppXY7IlRBGoVRgCgLDqBhQAUAN/5afhQA2aNZY2jcZVgQR7Un5jTcXdHNL4QQXoeS7ZrcHOzbgn2JpRUYo9V5rJ0+VR17nSlAybRwBVHkPUb9nH96gVhPIH96gdhfs/+1QFg+z/7VAWD7P8A7VAWD7P/ALVAWHCFR15oCxIBQMKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAQXNpDdwtDcRRyxN1SRQwP4GnGUoO8XZkSpxmrSV0YZ8CeGzJv/suLPoGbH5ZxXaszxaVlNnI8twzd+U27Wyt7GBYLWCOGJeiRqFH6VxznKb5pu7OuFOMFaKsixUlhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAN/wCWn4UAVtTujY6bc3YTeYYmkC+uBnFXTh7ScYd2Y16jp05VF0R5XF441aO8E7XRdQcmIgbCPTHavppZXRcGktT4qnmuNVVTctG9uh6wHLQK4O3cAemcV8u9HY+4i7xTQzzH/wCev/jg/wAaQw8x/wDnr/46P8aAuS+evvQO4eenvQFw89PegLh56e9AXDz096AuPV9x+6w+ooGOoA8Q+J3/ACOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf+hCscR/Cn6ClsfRlfPEBQAUAFACZoAM0ALQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UADqrqVYAqRgg9xRdp3E0mrM5eDwFoEGoi7WGQlWDrC0hKKfp/QnFehLNMVKn7NvTv1POjlWGjP2qj/kdOVDDB6V556Inkp6H86BWDyU9P1oHYPJT0P50BYPJT0P50BYPJT0P50BYPJT0P50BYcsaqMYoGOoAKAPEPid/yOkv/XvH/WvbwH8IuJxtdgwoAKACgAoA1fDP/I1aT/19xf8AoQrHEfwp+gpbH0ZXzxAUAFAHO654ttdJcwRr590OqA4CfU/0relQlPXoelg8tqYj3npHucy3jzU9+fJtdv8Ad2n+ea61go9z03k1C1uZnQ6H4xtdTlW2nT7PctwoLZVz6A+vtXPWwk6eq1R5eLy6dDWOqOmBzXKecLQA2SRY13NwKAIvtcPqfyp2FdB9rh9T+VFgug+1w+p/KiwXQfa4fU/lRYLolRw6hl6GkMdQAUAFABQAUAFABQAUAFABQAUAFABQA3/lp+FAFbUpJodNuZIF3TJExQD1xxVQV5pPYumk5pS2PHotTvUvkuIp5TclwQdxJY56e+fSvoVhIcjutLH09f2PI42VrHsrEmAFgVY4yAcYNfOWPlH5EWP9p/8Avs/4UxAOO7/99n/CgLkvnn+6PzP+FIdxfPP90fn/APWoC4eef7o/P/61AXE88/3R+f8A9agLiiZj0j/U/wCFAXJFLE8qAPrmgY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/6EKxxH8KfoKWx9GV88QFAGfrd+dN0e6u1+/Gny/wC8eB+pq6Ueeaib4Wl7WtGD6nj8kjO7O7FmYkknqTXuRhZWPstIpRWyIy1aqJm5Dd5UggkEcgjtVqC2MpNNWZ6/4b1FtT0K2uZOZCCrn1YHBr5/E0vZVXE+VxNP2dVxRr1gYjJY1lTa3SgCD7HD6t+dF2KyD7HD6t+dO7FZB9jh9W/Oi7CyD7HD6n86LsLInRVjQKp4HvS1HoOyPagYZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAZHtQAoOaACgCte30FhEJJ3wCcAAZJNNJsTdhLO9gvl82Bty9D2IPuKGmgTuWTUsZkxWGipqRmjgtBeZ+8AN2f8ar63KS9nzfI2ftuTXY1SBj5sY96RiJiP8A2P0oANsf+z+lAC7E/uj8qADYv90flSANi/3R+VABsX+6PyoAUADoMUwFoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQBi+KbZ7rw5eRxglwgcAd9pz/StsNJRqpnTgqns8RGTPIy3vX0KgfUuQ0tWqiYuQwtWig3sZOZ614KtXtvDFt5gIaUtLg+hPH6Yr5vHzUsRJo8DFT56rZ0NcZzkVxs8v5wxGf4aBMqf6P/zzlqrMm6D/AEf/AJ5y0WYXQf6P/wA85aLMLoP9H/55y0WYXQf6P/zzloswug/0f/nnLRZhdB/o/wDzzloswug/0f8A55y0WYXQf6P/AM85aLMLoP8AR/8AnnLRZhdB/o//ADzloswug/0f/nnLRZhdB/o//POWlqGgf6P/AM85aBkyW0MihgrDPqaAsSxwJESVzk+9IaRLQMy9a0ttShj8twkkZJG7oQetVGXKTKNw0bTDpsTo7hpHO5iOg9qJS5hRjYu3ayNaSiL/AFhQhfris5q8WkawaUlzbHnge6e7WCOOT7RuwFwcg1zUsLy69T6d+yVNybVrHobg+SA4DHjORnmutHyr8iHav/PNP++KZIbV/wCeaf8AfFAEnmv7f980D1DzX9v++aADzX9v++aADzX9v++aQDlaVhkY/KgZKoYHlgfwoGOoA8Q+J3/I6S/9e8f9a9vAfwi4nG12DCgAoAKACgDV8M/8jVpP/X3F/wChCscR/Cn6ClsfRlfPEBQAhGQc0vMDznxF4KuYp3udKTzYWJYwD7yfT1Fe1hMfCyjV+89XD49W5ZnKNpuoCTYbG639MeS3+Feoq1G1+dHU68N7nSeH/A13dXCT6rGYLZTnyj9+T2PoP1rixeZwjHlo6vucVbFq1oHpiIEUKoAAGAAOgr5/Xqea9XcdQAyQOVwjBW9SKAIdlz/z1X8qNCdQ2XP/AD1X8qegahsuf+eq/lRoGobLn/nqv5UaBqGy5/56r+VGgahsuf8Anqv5UaBqGy5/56r+VGgahsuf+eq/lRoGobLn/nqv5UaBqGy5/wCeq/lRoGobLn/nqv5UaBqGy5/56r+VGgaihLjIzKuPpSGrligYUAFABQAUAN/5afhQAOwRSWIAAySe1HkhNpLUwIvF+izXogWchmO0SFMKT9a7HgMQoc7Wh5cM6wk6nslL/I3mdUXLHArjR6lxn2mL+8PyoC4faYv736GgLk2aBhQAUAFABmgAoAKAPEPid/yOkv8A17x/1r28B/CLicbXYMKACgAoAKANXwz/AMjVpP8A19xf+hCscR/Cn6ClsfRlfPEBQAUAJigAxQAYpWAWmAUAFABikAYoAMUAGKADFABigAxQAYoAMUAGKADFABigApgFABQAUAFABQA3/lp+FAFbU7Vr3Trm1V9jTRMgb0JGM1dOfs5xm+jMa9P2lOVNdUeQweFPEE2pCzewljG7DTn/AFYHqD3r6ueY4VUnNS17Hx0MnxHtFG1tdz2MIywKikkqAM+tfI3u7n2iVko9hm2b/a/z+NAw2zf7X5//AF6ADbL/ALX5/wD16Yahtl/2vz/+vQGobZf9r8//AK9Aahtl/wBr8/8A69AajhHIRy5HtSHYlVNv8TH6mgY6gDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8M/wDI1aT/ANfcX/oQrHEfwp+gpbH0ZXzxAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFADf+Wn4UAMuJkt4XmkbbHGpZj6AUJXaS3GouTUVuzkI/iBateBJLR0tyceaXyQPUj/69dv1CfLdbnqzympGF+bXsdh5nybgCwPTbXDbU8jbQb5x/54v+VOwrh5x/54v+VA7kuaAF4oGHFABxQAZoAKACgDxD4nf8jpL/ANe8f9a9vAfwi4nG12DCgAoAKACgDV8NceKdJ/6+4v8A0IVjiP4UhPY+jK+eICgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAb/AMtPwoAivLZLy0mtpM7JUKHHoRTjLlakVCThJSXQ88j+H2oNfBJriD7KDzIpO5h7DHBr2P7SpqndL3j1KmZRlHRanopiAiCLgAAAfSvG1e55L1I/Ib/Z/L/61BNg8hv9n8v/AK1AWDyG9vy/+tQFg8hvb8v/AK1MLB5De35f/WoCweQ3t+X/ANakFhy24x8x59gP8KB2JVjVegANAx1AHiHxO/5HSX/r3j/rXt4D+EXE42uwYUAFABQAUAPileGZJYmKyRsHVh2IOQaTV00wZ6vpvxZsDaINSs7hLkDDGABlY+oyQR9K8meXz5vd2I5WXf8Aha+gf88L/wD79L/8VU/2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFg/wCFr6B/zwv/APv0v/xVH9n1fILB/wALX0D/AJ4X/wD36X/4qj+z6vkFg/4WvoH/ADwv/wDv0v8A8VR/Z9XyCwf8LX0D/nhf/wDfpf8A4qj+z6vkFg/4WvoH/PC//wC/S/8AxVH9n1fILB/wtfQP+eF//wB+l/8AiqP7Pq+QWD/ha+gf88L/AP79L/8AFUf2fV8gsH/C19A/54X/AP36X/4qj+z6vkFjW8PeMtO8S3s0FlHcq8MYdvNQAYJxxgmsK2HnRSchG/PMsELyt91FLGuaTsmxxi5SUV1OZ/4SmRZwzxp5WeVHUD61x069SUtVoev/AGYuXR6nTGQ+WHTbzgjJxXcePawzzpPSL/vqixNw82T/AKZf99UWDmJfMT+8KLDTDzE/vCgYeYn94UAHmJ/eFAB5i/3hQK4qurHAIJoGOoA8Q+J3/I6S/wDXvH/WvbwH8IuJxtdgwoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoAM0AGaADNABmgAzQAZoA9C+Ef8AyHNR/wCvZf8A0OvNzH4UTI9blRZY2RxlWBBHqK8n1Em07o5xPCMIuxI907wA58vbyfYmlGMUtD03mk3T5FHXudIUDJt6D2qjyxnkD+8aBWDyB/eNAcoeQP7xoCwfZx/eNAWD7OP7xoCweQP7xoCw4QqBzyfWgLElAwoA8Q+J3/I6S/8AXvH/AFr28B/CLicbXYMKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD0L4R/8hzUf+vZf/Q683Mfhj6kyPVNSujY6bc3QXcYYmfb64Ga8yjDnqKHdnPiKjp0pTXRHlMPjXVo70XD3bON2WiP3CPTFfUzyyj7Nrlt5nxMMzxirKbnfy6HrZkLQhwSuQD06V8o1bQ+6Urq5F5j/APPY/wDfIpg2KJH/AOex/wC+RSBMl89fegdw89fegLh56+9AXDz196AuHnr70Bcerbj90j6igY6gDxD4nf8AI6S/9e8f9a9vAfwi4nG12DCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA9C+Ef8AyHNR/wCvZf8A0OvNzH4Y+pMj11kDqVYZBGCD0NeSiGrqzObh8B6BBqIvUtn3BtyxM5Man/d/pXoSzPEyp+zctPxOCOV4aNTnUf8AI6QoCMHNcB32G+Snv/30aAsHkp7/APfRoCweSnv/AN9GgLB5Ke//AH0aAsHkp7/99GgLB5Ke/wD30aAsOCADAoCw6gYUAeIfE7/kdJf+veP+te3gP4RcTja7BhQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAehfCQga7qAJ5NsuP++683MvgRMj1+vKJCgAoAKACgAoAKACgAoAKACgApAeH/E1g3jSXBziCLP5GvcwH8IuJx1dhQUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgC7pWq3mi6hHfWMvlzJxyMhgeoI7is6lONSPLITVzsx8W9ZAGbCwJ9fnH9a4v7Oh3Fyh/wtzWP+gfY/wDj/wDjR/Z0P5mHKH/C3NY/6B9j/wCP/wCNH9nQ/mYcof8AC3NY/wCgfY/+P/40f2dD+Zhyh/wtzWP+gfY/+P8A+NH9nQ/mYcof8Lc1j/oH2P8A4/8A40f2dD+Zhyh/wtzWP+gfY/8Aj/8AjR/Z0P5mHKH/AAtzWP8AoH2P/j/+NH9nQ/mYcof8Lc1j/oH2P/j/APjR/Z0P5mHKH/C3NY/6B9j/AOP/AONH9nQ/mYcof8Lc1j/oH2P/AI//AI0f2dD+ZhyjZPi1rTIQtjYqSOGw5x+GaFl0L7j5TiLy8uNQvJbu6lMs8rbnc9zXfCCgrRGlYgqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKBhTuIKLsAouwCi7AKLsAouwCi7AKLsAouwCi7AKLsApAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAC4o1AMUAJQAUALRqAUwEpALin8gDFIBMUALigBKACgAoAKAFxQAlABQAUAFABQAUAFABQAUAFABQAtHoAlABQAUAKBmgBKNQCgAoAKACgAoAKACgAoAKACgAoAKACgAoA9G8FaVocvgzUNV1XTY7o2sshJIy21VU4HP1ry8XOoqqhF2E9zQ0S18E+LZLiys9EltpUj3mTG0gZxwwY8+xqKjxFCzcrid0cHB4X1O9m1EWEP2iKwlZJX3qvTPOCfQdq9D6xCKjzbsq5Bpnh/U9Ytbi5sbfzYbcZlbeq7eM9zzwKdSvCm7SerC5vfY7b/hWJvP7FPn7+NQyn/PTHru6cdK5+Z/WuVS07fIXUt+MtF03TvCOhXdpZxw3FwqGV1zl8x55/GpwtSUqslJ6AnqZ1l4C8QMLa7m00/ZzIjPGXG/ZkZyvXp+NaTxlLVJg2T/EfSbDR9ctYNPtUt4ntt7KmcE7iM/kKWBqSnBuTvqEdSp4a1TwxYWMya5pL3k5k3I6qDhcDjlh3zTr0q0pXpuyB3O58QWvgvw5b2k13oCut1nYIlyRgA85YetcVF4iq2oy2JVzKsfD+k674Q1i/wBM0kG5e4kWzHR0Hy7R1x3NXKtOlVjGctOo72ZyWseDtb0O1F1e2gWDIBeOQOFJ6Zx0rupYmlUlyxepVxdJ8Ga7rVqLqzsx5B+7JK4QP9M9aKmKpU3yt6iuZmpaXe6ReNaX9u0EwGdrc5HqCOCPpWtOpGouaDGjU8HeHR4l1wWsjMltGnmzFeu3OAB7k1jiq/sYXW7E3Y7O41HwDY6odFfRo2RH8qS58sEK2cHLE7jg9TXCqeKlH2lxanK+L/DdvpeuQQaQ/wBpgux+5iRxIytnBTjr1GP/AK1dmGxDnBuppYaegrfDrxMtt532FDxnyxMpf8vX8aX16je1w5kZOl+HdV1mS4jsbRpZLf8A1qlgpXqMYJHPBrapXp07cz3Hcvz+BPElvZrdPprFWIGxHDOM8DKjms/rlFu1xXRFqvg7XNGsReXtlsgyAzJIH2E9N2OlVTxVOpLljuNNDrTwT4hvre2uLfTy0FyoaOTzFxjGcnnj8aTxdGLab1QNq5KPAPiQ37Wf9n/OFDmTzF8vH+90/DrS+uUeW9xXRQm8NatBrUekS2hW9l/1aFhhxzyGzjHBrRYim4Oaeg7jG8P6mmuDRWtsagSAIt69xu65x0pqvD2ftL6Bcni8Ja3Pq0+lx2W68t0DyR+YvCnGDnOO4qHiaSgp30YXLbeAfEqWRuzpx2AFjH5i+Zgf7Oan67Q5rJiui54fsrabwVrNxLopupYg+27yn7nCA9yDx14BrOvJqvFc1loDNCL4eSSeCzd/ZJv7aJ3LH5y7Sm7g46fd96zeNtW5W/dC+pyepeHNV0iygvL218u3nIEbh1YHIyOh44rsp4inUfLFjuJeeHtU0/S7fUrq28u0uNvlOXXLZGRxnPSiNenOfInqBreA/wCyLjXP7P1eyhnS6G2F5M/JIOg+h6fXFY41VFDng9gkbth4CQfEG4tJ4d+lQr9pUN0dW4VPwOf++awni/8AZ018WxN9DF1HRT4j8S3Vv4X0yNbO2xGXQ7UJGcsST3OcewranVVGmnVerGvMztZ8I61oMAnvrQCEnHmRuHUH0OOlbUsTTqvli9R3uVrrw/qdnpFvqs9tss7jHlSb1O7IJHAOR0qo14Sm4J6oBbjw7qlrpVtqUttttLoqsUm9TuLdOM5FKNenKTgnqguaDeAvEcbSCTTwgjjMjM0q7cDPcHrweKz+u0dLMLo5vOQDXUAUAFABQAUAFABQAUAFAHq3w/upLH4e6rdQwiaSGaV1jIJDkIvHFeRjY81dImW5p+E/FWpa9qE1neaJ9khERYzRh1APTByByc8Y9KyxFCNJJqVxNWKXg6ySy/4TCxt2aRYp2jTJyx+RsfU1eIk5OnJ9h9ih8N4JY/CevO8bqrqQpZcZIjOf51eMlF1I2YPchX/khn/bQf8Ao4Vov99X9dB/aNrVo4pbDwLHOAY2ngyCMg/uuB+eKwptp1WvP8ye5T8U6rr1t8RNOtrOS4W3byvLiTOyQE/PkdD3+mKdCnSeHk3uNWsZHxZ/5GSz/wCvQf8AobVvl3wMcTgG+630NeiUen/FT/kFaD/wP/0Ba8vL/jmREd4Xu5rH4S6rc20hjmjeYo46qflGRSxEVLFRTB6sSxvLm++DurSXlxJM6eageRizYBU9T9aU4KGKiooNmb3ia40zT9H0pLi+1SytsAQtpwxkhRgMcenQVhRjOU5cqTfmI5T4k6hBqNtpjLaX0MqFx5l1bGLeuB0J684P4114GLjKSuioifCa4jj1u+gYgPJbqyep2tz/ADp5knyxfYUjm9T8P6mvii400WkrTy3DbMIcMrNkNn0wetdFOtD2SlfYaZ1/hbwqPDfjy1t7y4tppntJJYxECNpyBnnvjd+tceIxHtqLaVlcTegtnqevN8WJbV5rk2/nurQknyxCFODjpjGDn1olCl9VT6hpY6XRUhj8e+JvIwMxW7OB/f2nP9K56l3QhfzF0RjeBNY1G88O69cXV5NNLCzPG0jbtp2E8Z7Z7VriqUI1IKK3B7kGiXt1qXwl1mW+uJLmRRMoeVtxxtU9T7mqqQjDFRUdNh9Rdf1C7074T6JLZXMtvIywqXiYq2NhOMj3ApUacZ4mSkr7hbUm8d6zqNl4f0Ca1vJYZJmV5GRtpchAecdsnpRhaUJTmmtgSNDxJtHxC8KNgZPmjP4VnRX+z1PkJbGLcW0zfGyJ1icoNshbbwF8ojOfTNbRlFYNq+v/AAR3XKbek/8AJWNe/wCvOL/2WsKn+6w9WLoZngLWNR1HxfrUV3eTTRAMyo7ZVSJMDA7cccVri6cIUYOKG1oQeHwB4C8XjHHnXI/8doqv97T+QPcbDe3n/CmpLgXM/nrKVEgkO4L5uMZ64xxVSjFYy3QNLkmiwnxj8MzpeQbqzlWNcnsGBB/75JH4Uqz+r4nnWzDZmV8UdQRtUs9HgOIbGEEqP7zDj8lA/OtsvjZOo92NHCIzI6ujFXUgqw6gjoa77J6FHsur+I7s/DBNWQBLu6hSNmH8JY7Sw/X868WlRTxPJ0RmlqZOhPNZ/B+6n0sst5mQu0XLD5wCfqErSslLFpT2B7kvhW4u9R+Hutf2xJJLbhZBFJOSTtCZPJ6gN0oxCjHER9mPqrFTxEryfCLQmVS23yS2BnHysP51WHajipXHsyfxHDJB8NPDkUqFJFmtgysMEHBqaD/fza8xLck+KGvalptxZWVldPBFNE7S7MZfnGCfTGaeAowneUlsEUeU9K9YoKACgAoAKACgAoAKACgDo9A8a6p4csXs7FLYxPIZD5sZY5IA7Eelc1XCQqy5pXFa5oXPxP8AEVxA0ataQlhjfFCdw+mSazjgKSetw5TG0DxRqPh28mubRkk8/wD1qTAkPznJ755PPvW1fDwqpJ9AsbNx8TdduFnjZLMRTIU2CI/KCCDg5znnvWKy+krPW4cpijxLfDwv/wAI9tg+xZznYd/3t3XOOvtW31ePtva3GP1TxVqOrabY2M/kpHZbfJaJSrAhcAk5pU8NCnJy3uKxtr8UdeFisHl2hmAx9oKHcffGcZrF5fTve/yDlOe1/wAQ3viS8jur5YVkjj8tREpUYyT3J9a6KFCNFNJgjJxkEVsM3Nd8U6h4igtYb1YAtrny/KQqeQBzkn0rCjh40m2uothtr4nv7Tw5c6FGsH2S4LFyyHfzjODn29KJYeLqKo3qgC28T39r4cuNCjWD7JcFi5KHfzjODn29KJYeDqKpfVAaej/EPWNIsUsylvdwRgCMTg5QDoAR1A96yqYKnUlzJ2Cxj694h1DxFeC5vnX5BtjjQYVB7D+tbUaEaKtEaVijZXtxp95Fd2krRTxNuR16g1rKCmnFhY7VfivrQtwjWlk0mMeZhh+OM4rg/s6nf4hcqOVk13UpdbGsNdN9vDhxKO2OMAdMY4xXYqEFT9mloOx1LfFXWjblBa2Ky7cecFbP1xnFciy6F9xcqMPR/F+q6Ld3t1C0U094QZnnUsSeeeCPWt6mFhUSWyQWI9I8UX+iWF7Z2iwGK8z5nmISeV28cjHBoqYaNSSk3sFgsfFF/p/h650SFYDaXO7eWQl/mABwc+3pTlhozqKpfYLCX/ie/wBR8P2uizrALW22+WVQh/lBAyc+h9KIYaMZupHqOwuseKL/AFyysrS6WAR2f+rMaEE8Ac8nsKKWGjTcnF7iJdW8Yarq9/ZXsxhiuLI5haFCMHIPOSc9KmnhYQi4rW4WNiT4p686xhYbJGU5YiMnf7cngfSsll9Pa7DlMy38catba/dayiWv2q5jWOQGM7cDGMDPt61pLCU3BQbegWKmi+J7/QdSub+0WAzXAIcSISOW3cYI71dXDxqQUX0C1x9p4r1Cy0rUNOiWDyL9naYshLZYYODnilPCwclLXQLFnQ/HGqaFpjadDFbTW5LMomQkrnr0NTUwkKsudvULHVfDe1bSdNu9dvL2CPT54zmMnDAox5P64x61x42SnJU4p3Qpa6Hneq6hJqurXd/JndcSs+D2B6D8BgV6VKHJBRKKdaAbk/inULjw1FoLrB9ji27SEO/g5HOf6Vzxw0I1Oe+orDvDni3U/DLSCzMckEhy8MoJUn1GOQaK+HhV+LRjtct69491bX7I2TpBbWzY3pAD8+OxJ7e1RRwUKcua92K1h2ieP9X0PTVsIUt54Uz5fnKSU5zjgjIoq4OFSfM73C1yvq/jbV9csYbS9+zlIplmDJHtYsM4zzjHNVTwkKb5lcLWKviDxJfeJbiGe+WEPChRfKUqME55yTV0MOqN1EdrGNWwBQAUAFABQAUAFABQAUAFAwoEFABQAUAFABQAUASQRGe4iiBAMjhAT2ycUpOyuB3p+Eupjg6pYj/gL15/9ow/lZPMc/4m8JXPhf7L9ouoJ/tG7HlA8bcdc/WunD4n2zdlsUmc8CD0NdOiAWlcBOvegdwJA6nFHkIWi4G/4Y8KT+KHuUt7uCB4ApKyqTuBzyMfSubEYn2DV1cT0F8O+EbzxHeXltDNFA1pjzDKCeckY4+horYpUUnvcbdhNP8ACV7qPia50NJY0mty++RgduFIGfXnIpzxMY01Va3FexbHgiY2Gr3X9pWxGmSPG6hT+8KKCcfnj8Kj62uaK5XqHMUrvwvcWfhS28QNcRNBcMAsQB3DOep6dquOJi6rppbDvqHiTwtc+GhZm4uYZvtSll8sEbcY65+tFDEqtey2C9yr4f0SbxDqy6fBNHE7Iz7pASOPpV16qpR57A9CvqunvpOq3VhI6yPbyFGZRwT7VVOp7SCkC2KdaDAEHoc0LyELS6gFHoAmRnGRn0oeoCk8Y7UaAJQAUAFABQAUAFABQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAWNP/AOQlaf8AXeP/ANCFRU+Bgex+NtD0jVr20fUtdXTnSNgiFlG8Z68142Gq1IJqMbkJnE22jaFZ+M7O0F1LrVmYTJthTzC0nOFIU9O5/Wu2VWq6LlblZXQ9Bt9Gt9X+1Wuo+F7Wzsh8tvJlPMYeuFHynv1rz3VlCzjO7Juct4T0/RofBGq32padDd/ZLmX5nQF2VAuBntz/ADrpxM5urGMXa6Q2TXo0nxT8Pb7VotHgsbi037PLABBTB6gDIIPQ0o+0oYhQbuGzNHRdFgsvCWn3WjaRYalcTIrztcsAWyOcEg8g8Y4xWdWq5VWqkmkK5xHj+3sINXhNnpc+nSshM0UkYVGOeGXBIPcHHpXfgpScGpSuUhPhzqP2DxhboThLpWgP1PK/qB+dGOhzUvQJHfxRp4RXxBqTABbnUotn+6xTP/obflXnNutyw7Incsx2CaL4j8Sa/IuIjbRup7HCkt+qrS5/aQhTXcL9DkPDVna6h8PvEGoXVrDLd7pnEzoCynYG4Pbkmuus3CvCKfYb3F1v/kjGk/8AXSP+b0of75IFuO+K33ND/wCuL/8AstPLvtDRjfDP/kdIf+uEv8hW+P8A4PzCWxmeMv8AkctX/wCvk/yFaYb+BEa2Ok8B6NpqaNqPiPVLdbiO03CONhuA2rljjoTyAM1zYyrJ1FShoS9zZ01tG+IWl39v/Y8Nhd24BjkjAyuc4OQB3GCKxqRq4Sabd0w2Zk/2fY6x8Knu4LKBNRsDiV44wGYoeckcnKnNac8qeKSb0f6hfU0NQ8PadBpvhvw+baFL6+dPtE4jHmBFG5/m68niojVm5Tq9EFzozpNtBfRaVD4St5NKKgPdkxnBI/un5j7nrXL7Rtc7nqK55P4x0aLQfEtzZW+Rb4WSIE5IVh0/A5FexharqU03uWtjBroAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAmtJFivbeRzhUlRmPsGBNTNXi0B1vxE1/Tdf1Cyl02czJFEyuSjLgls9wK5MDSnSTUkKKsVPAet2Wg+ITcX+VhlhMXmbc7CSDkgduMVeMoyq07R3BnZ6b4k8LaLrN5dNrt5eyXfzGSRWdIxnIQYHv6dBXBOjWqQS5bWJszn7HxDpVr4H13Smuybq5nmaECNsOrYwc446d66J0Kkq0ZW2SKtqR6L4h0y0+HWq6RNcFb24MvlxiNjncABzjHaqrUpyxKqW00B7mlpeqeF5tJtvI1Wfw9fRgecYMgSHGDkYKsD1rKrTrqo21zIRmfEHxNYa61jbWDtOlruL3DLt3kgDj8sn3rXBUJ07uWlwijjrW4ktLuG5iOJIXWRfqDmu2ceaLiUegeP8Axjpuu6JbWemzs7mYSSgxsu3CnAyRzyf0rzsHhp06jciUrE/ibxzp+peCVsbW4Zr6dI0nQxsNo4L8kYPIx+NTQwk41uZrRBbUy/DniLTLDwHrGmXNwUu7nzPKTy2O7KADkDA5FbV6U5YiM0tBtakeqa/ptz8NNP0eKctfQuhePYwAALZ5xjuKUKNT6y5taMLai+P/ABBpuurpQ0+cy+RGyyZjZcE7fUexqsFSnTcuZAjN8D6rZ6N4mivL+UxQLFIpYKW5I44FaYynKpT5Y7gzqNQl+HGp6hPe3N7dmad977RKBn2G2uSCxcIqKWiFqQ6F4l8O6Zc6rojtI2g3ZzDKwY4ygDBuM4Pr2xTq4etOKq/aG7lmDW/Cvg3Sb0aFeyX17cjCk5OCAcZOAABkn1NS6dbETXOrJCs2YngDxNZ6HPfW+qSEWVygJJQuN49QPUE/lXRjcPKaXJugaG674vW48eQazaZltbMqsKkFdyj73XpnLfpRRwv7hxlux20OlutX8GavfLq9zrV9Cdg8yyEkiBiBgcL3+h5xXLGliIR5FH5iszzrXLy1v9XnnsopIrUkLEskjO20cZJJJ564zxXp0YShBKW5RnVqAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAZoGFABQIM0AFABQAUAFAwoEFAwoEFABQMKACncApCCgAzQMKBBQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHU6z4UvFXT5NI0q8mhmsIpZHjRnBkIy3P5cVy0sRH3vaSs7sVxviTw19ivb97KMR2tlFbmVXc7g0gHTPXnP0ow+I5lFS3dwTKUHhjUbiS3VfIVJrQXnmvJtSOLOMue1U8TBRfrYdzTvfCEgstFhs/ImvLoTvJPHPuiZFIw27oAB1rKGKTcpS0SsK5c03wlZhdGW7a3uvtmovE0trOWR4hHnGR0IYH3qJ4qbcraWXbzC5hWPhe9v7eKdZrO3W4dktkuZwjTkHGEHfnjPrW88TCOn3juZsOn3E2qR6dsCXLzCHbIcbXzjB9Oa2lUioc/QDWfwhfx3sts9zYL5EfmXMpuB5duM4AduzHsOtYrGQavZiuZuqaVcaRcJFcNEyyoJIpYnDJKp7qe9a0qsKiuguaVp4euNUstKSztolnuvtDCV5/9aEI4xj5cfrmsXXUJScnorBcmXwRfMsMi6hpRgmOyKYXY2vJnHljjlv0pPGQ7O4XKlt4Zu5RO1zcWdikM5ti13NsDSjqo4Ofr0q54iK2Td1fQLixeFdQa4vYp3tbRbOQRTTXMwSMOeig9yetOWKgopx1v94XJ5fDV1p9vqcV5bQyTwQQyrIlxxEHfAIAGGz09utR9ZjKUXF6NvoFxL7wZqdhHdmWayeW0TzZreK4DSLH/f246URxdOTW9mFyNPCWovbCQSWguGh89bIzgTtHjO4J9Ocdar61TUttNrhcSLwpfTWUU4ms1mmhNxFaPMBNJH13BfoD3pPFQUuW17aXATwposGva0LO4nEUXlO5O8KxIU4xkHPPJ9garEVXThzJDbNR/B4udH0iW1urCKe4EqO8tzhbiQPhRH68fh0rBYvllK6dvyFcybTwxe3CyvNLaWUccxt995MIw0o6qvqf0raeJhFqyuFyjLpl1b6sdMuFENyJREwc8KxOBz6cjmtVVThzrYLlybwzqUFnqN08aeXp8/2efDZO7IHHHI5H51msRBuMe+oXJz4SvYprpLu6sbSO1ZElmnn2oHZdwQHHLY6+lT9ajZNJu4XK994c1DT4L2WcRYs5UjmCPuI3jKsPVSO9VHEQnZLr+gXK99pNxp13Ba3LRLLNHHJjd9wP03eh71cKsZxckO5bn8Lanbw6tK8abNLcJcYb1/u8cjBB/Gs1iYNx/vBcePCl+J5o55bS2jgSN5p55tiR7xlVJx97HYUniobpN/8AAFcztU0y50i7e2uQm8IHVkYMrqRkMpHUGtadSNSPNEZ02seC3+1gaZJaDdaxzJaNcfvpPkBchT754/KuWniklaae+4rmCmhXj3OlwDyt+por2/zcYJIG7jjpXR7eNpS6RHc3NJ8PW8wsFvbMASJe7pVuCfMaIcfL/Dg/nXPVryTfK+xNyLw34QmvrzS5L57VILoiQWz3GyaWLuyr1xTrYtKMlFfMd9DmLhBHczIv3VkZR9ASK7FqrjI6YBQAUAFABQAUAFABQAUAFABQAUAFABQwOh8Qa6bttPGn3lwqQ2EULhWZAHUEHjv9a5aNBLm51q2wsbN5rukarcaxayXzW8N9b2oS5eFmAeIDIYdefWsIUatNRklqr/iKw6fW9Cmi/shL2VbOXS47P7W8BykiOWBK9dpz2oVCqv3ltb3sFmFvrmh2NpYaUt9LPB9lurW4uVgYbDKQQyqeSMj8qJUas5Opy21TsFmR6dquh6IujW8epNdC21F7meVbd1UAxlRtB5Pb9ac6dapzNxtdfqFmSab4isJNL0yOXUILJ7FSkqS6eJ2kUNuBjYg4Pse/NTUw8+aVo7+YrHO2+rRP4zi1adnEP24TuzDLbd2eQO+PSupwaoOHWxXQ1tG1+0hn123kuI7dL+fzoLma2EyKQ7EB0IPBB/A1jUoytDS9l6CaG6p4smtr23/su8iuPJt/JeVrNEjYltx2IV+VfrzRSwqaftFYLElj4ks0ttONxMRPFHf+dtiIAaYfLjHqfTpSlQleVl1iFjNt9VtI9I8P27O3mWd880w2n5UJQgj16HpWsqc3Ob7oLG6muaM76hcw30VncyahLOZpbHz3liJ+UR5GFP1xXN7GrorXVu9hWJdWudO8QWmqhbqeOye+juku0tHkUMYtpjZRyDxwelEFOjKN1rba/wCIbCeIr+y06TUtPLyh5NNsYoVeMhvkbcQ3907cU6MZyjGa6N/iFjOn1/T5PFPiK+Er/Z72zmhgbyzlmZVABHUdDWqoz9lCFtmO2hrN4usZJV1UajFC4twps109TOJQm3AlIPy+/pxWCw1RPk5evfQVirYa3pA0m2jv9RS6s47YpJp91aeZMsmOkUgAwucEZPAqpUainZKzvv0HY53wnqNtpfiK3urxykASRHcKW27kK5wOvJrrxEJTpNRWugFybVLCNfDEMdyZU0yRvOcRMOPODAgHrkDNZqnO9RyXxf5BY2/+En06+juIBqFvZbL+edJLnTxOssUjZ4BBKsP1rneHmrO17pdbCscl4h1FdV167vYpJWR2AR5AAxAAAJAAA6V20KfJTUZFHZjxno899ZpcBxZXFu76iPLPM5Cdu/MY6etcP1Spytrfp6E2MzTtc0+eHUbqe7t7LU571pzPPZ/aMxEcKg6Bga0qUZxaSV1bv1Bo1LS+0/W/Gl8EkkuNJ1GyX7UxjKeSY1BBbtkbD04+as5QnSoq+kk9PmD2OG1rUW1jWby/bIE8hZR/dXoo/AAV6FKmoQUBrY7SPxlpUrabFc7/ACLmF11bCH5n8tYwenP3c8etec8JNKTXTYLFWz8V294NXhuLmCzkur37VDNcWgnjxjbtZcHB2gYNazw8o8rSvZd7BYwPFOpxarqKG3naeKC3WBZDEsQbGc7UAG1cngV0Yam4R95WBHRvq+gJr1t4iTU5HmtrdFFn9nYM8ix7RhugXnn6VyqnW5HS5d3uIh07VNDkk8PahfajJbzaWgjktlt2YuQxIYMOMc896upSqrnhGN1IdmOtPEmlxR6erzOPJ/tDf+7bjzSdn5/pSnh5tydv5RWEsdU0KbVNF1y71J7aayhiimtBAzEsgKgqRxtOc0pU60YSpqN0+odDi7h1kuZnXlWkZh9CSa9CKaSTKI6YBQAUAFABQAUAFABQAUAFABQBMbW5W2FybeYQE4EpjO0n69KlTi3ZPULgbW4W2Fw1vMIGOBKUIUn2PShTjflT1C4v2O68ppfs0/lrjc/lNgZ6c470c0b2bQXEe0uY5RE9vMkhG4I0ZBI9cYp88WuZW+8Lj/7Pvd6p9jud7LvVfJbJX1HHT3qfax7oehCY3CbyjBM7d2DjPpn1qrq9kxD1tLl32LbzM+QNojYnJ6DGKXPDqx3JoLAyJeea7QzW6BhC0TFpGzjbwOD9amc0refmK5bvdAn02W8hvZlimt4klRQjES7scA44xnnPfiohXUkuXqFzNa2nWBZ2glELHCyFCFJ9j0rZTi3ZMLim1uFt1uDbyiBjgSmMhT+OMUueLdr6hchqhmxeeH7iH+zGtXF5FqKjyHjUjL5wUIPRgawjiIvmvpb8hXI9T0Sax1G5tLctffZcCaW3iYojdx+HTNOFdSipS0C5m7HEYk2NsJwGxwT6Zra6u1cC3aX+paTM4tLq5s5HwrhGKE+mRWc4QnG8lewFrxBpF9puq3aXLTXPlyAPdlG2uxAP3j359amjVhOKtZBczhaXJtjci3mMA6y+Wdo/HpWjnDm5bgNMEokWMxSB2wVTacnPTA70+ZWbvsA5bW4eJ5VgmaOPh3CEhfqe1JzirLm3C5F1pt9wJZ7S5tdv2i3mh3DK+ZGVz9MilGcZbMLjPLcRiTYwjJxuxxn0z61V03ygSR2d1NKIo7aZ5Cu4IsZJI9cY6e9R7SNrtgSQ2Ye2vJZJvKktwuImjbLknBGf4cdeaPaXcUle4XIntLiKBJ3t5khf7sjRkK30PQ1XOm7X1C5NHJqNnYyCNrqC0usK+NypLjoCehqGqUnrugKZrSwGrqeg3WneSwV543to7hpEibagcZAJrGFeM7rrsFzN8qT5P3b/ALz7nyn5u3Hr+Fa3QXLj6PeR6OupvERbmcwcqQwYDOSMdO2fXis1Xg58gXK0FrcXTMtvBLMVGWEaFsD1OKuU4x+Jj2CC1uLmQxQQSyuBkrGhYj8BScoRV29AuREFSQQQQcEEdDVrXURpaTolzqtwI1DxRmORxM0ZKHYpbGfwrGrXhBd/ILlGO1uJLY3KW8zQL96QRkqPqelaOcVK1wuEVtPOjvFBLIkYy7IhYKPcjpQ5RWjdguLDaXNwjvBbzSqgy7Rxlgv1x0odSMXaTsFyGqAKACgAoAKACgAoAKAHJs8xfMzs3Ddj0zzSd7aAeja1/bJvtVuPtEaeGmtlWPed0Dw4XCxj+/1x6GvMp+zcVG3v3+ZJNef2qmsazc3sjHw01lIIvnHkPGUxEqDpuzjpz1qY8jhBRXv3AdDq19H4lsLRbuQW0ehBxEG+TeIickdCcgflR7KLpuTWvN+odCDw3f3N2nhm8u7h57lZr4ebK25sCLIBJ7Zp1oKLnGK00BmdF4i1dvCemzHUrnzpNWZHk8w7iuFO3P8AdyTx0rV0Ie0at0C2pd13TbnWLDVLLTYfOmi16V5I1IGxWjwGOegz3rOnUUGpT/lAseIb+50+HxRLZ3Lwym4so/MibBx5YBwe3SlRpqbgpLTUOpDf3ErafqN55rfaZPDtrK8ob5i+/wC9n16c04QSaVvtMOpPrLTNP4hlvWke0k060aMs2QU3Lv2/ju/GppLSPLvdgW9YkZF1qR7e+bS3s2WN5bpPsZUqNnlKF+9nGAOc5qaS+HXW/bURXvEvLjR7vz/tdnGmmAefFKsthMoQYAVh8rHpxyDTjaM1bXXbqM83vLC509oVuY/LM0SzINwOUboeK9WNSMk+XuUdP4S1a4s9C1wIUJtIPtNsXGTFKTsLL6HBrjxVNSqQv1Ey9YLrk+jeHT4fkmEKO5vDC+Ns3mZJl9tvr2rOfs1Oaqr09PIXVkmr2B1/S7mLQolnji1uZ2WNgAisg+b2XOeaKc/ZSTqfygtDB8cHPjjUMHP7xOc/7C10YX+ArlLY6nUdSurnxf4lsJrmSSyTTJtluW+QERqQQOmck89a5I00qUJJa3J6GjpdrcpLawP9vurdtO8tZ/ORLR90Zwixj77duee9ZVJLVqyd9uv3gYVhcxjQLbxHO4F/o9rJYGN/vGXhYj+AZvyrolF+09ktpNP5dQZr6W7fZdAksItQls0tV894bpI7UPz5vnAgnOc5z+FYTteSla9+zv8AIDhNCijuvGlqtvMlsjXZaJyA4QAkrjPB7AfhXo1W1Q1XQrodT4jgun8GXxmttSVo72OX/iYXAlk28gvtH3Fyce9ceHa9tGzWq6ErcxvCUMeuWF74cuJRGryR3cLMcBSpAk/NCfyrfEt02qsfQpmtJqF9rmmavP4fMwvTfqClu22T7KqbYwvfGRk49axjCNOcVV2t+JPqWruSDbqy3ro8qWWnLqJBzmQS/PnHU4xms4qXu2Wl3b7gIdWXWV1HVptSuFXw688YVZm3RyRbxtEIB4O3uKun7Llior39fy6gXtekkjtvED3FvfmweBlie4ukNsckeWYVAznpgD3zWdJaws1f0f4geaX+n3WmyrDdx+XI8SyqNwOVYZB4r1oTjO7gUeloNcGq+Hp4pnXQ47CE3R8wCFV2fPvHrjGM+1eU/Zcs01719CTNtNOn1VPCN1p0e+ztJ5BK+4AQgT7gG9PlrSU1T9pGW7/yDYr6/Lez+FtSEcszwQ63OJVD5CocFQRnpuOfrVUVFVVf+UaG+EGuz4fuIoLa8mia8Us2mT+XcxsF4LA8Mn1PWqxSXtbtrbrsJ7mhqUGqfY9Tg0K6kudRGp7rt7XbHKy+WNuduOA2QccZBrCm4c0XVVlYDmfGjxt4jOWR5lt4Vu2Qg7pgvz9OM/1rtwifsttG9BrY7QDWD4hvJoJH/wCEdbT3FttceSV8r5Qo/vZznv1rhfs/ZpP476/eIqWP9qnUtAnsJWXw5HZxecQ4EKqFPmiQdN2c9faqlycs1L47v/gAT6S+7TNFfRoNSe3R3aT7HcpHGr7yT5wIyRtx17VE01KSqNfNfkL1G6ZJPOm2xt7sWh1KZ4ptIuB+5Jb/AJaqQFZe4J7VU1bWbV7Lfr6DPOtXQR6zfIJkn23DjzUUBX+Y8gDgfhXpUneCdrFFOtACgAoAKACgAoAKACgBdzFQuTtByBngUrIBSzFAhZto6LngfhRZXuA2nZAFFgDmgBQxGcEjIwcHqKVkADJOKegFu50u/s0le5tZYkil8iRmHCyYztPvjms41ISas/NBcqEk4yTxwOauyAUu5QIWbYDkLngfhRZXuAeY/lhN7bAchdxx+VFle4DaYAKNOoGlpmi6vqyS/wBm2VxOg4kMfC/QkkA/SsalSnB/vHqLYq3VrdafcSW1zFLBMvDxuCp/H2rRSjUXMtSivVbCFpWAkUTtEWUSmOI5JGcIT/LNJuKfmBHVAKHYKVDMFbqoPB+opWQDaYDmkdixZ2JbqSSc/WlZANpgOV2RtyMyt6qcGk0nuA2mApZioBJKr0GeBSslqBZ+x3rQTEwz+XbKGkDAgRBuAcHpmpU4XWu4DLq7mvJVkmIyqLGoVQoVVGAABThBRVkBDuYKVydp6jPBp2QAGYAgE4PUZ60WTAMnBGTg9eaLIBUkeMko7ISMEqxHH4UNJ7gWHs761IZoJ4i0ImBCkfuz0bj+E+tRzwlpfYCO5tLizZFuIWiZ0WRQw6q3Q/Q1UZKS91gRbmKhdx2jkDPAp8q3sAu9ghTc2wnJXPBP0ostwAO6hgrMA3DAHGfr60WTAFkdAwR2UMMHaxGfrRyp9AG0wCgAoAKACgAoAKACgDY8MQ2N1r9vZ6hGrwXQaAE5+R2GFYfQ4/OsMS5KnzReqB7HQ2Hhyxto7C11O133oiub+5XJVmjj+VI/YMQT61y1K85Nyg9NF95Nw0iy0vxDFp98+lW9oRqaWksUBby5kZC3IJ4Ix1FOrKpTcoqV9L6hsZ2kaXZ3OlX80turyRanbQIxzwjOQy/iK0q1Zqas+jKb1KnitrGPXLmysNPis4bSaSLKsS0mD1Yn6HHtWmFUuRSm73EjqNG0PTJl0/T7yx06J7m18xxLOzXjsVLB1C8IvAIB7da46taavJN6P5CZiW2lWckvg5Tbqft//Hxyf3v73HP4eldDqzSqu+3+Q76Fq5ttK0K2tpX0mK9a+vbhP3jsBFGkuwKmD97vk1mnUq3XNayX5CNfVNFttW1i7jl3K03iBIGdWP3PJ3EAdM8dcVjCrKEE1/L+oJlG90zRLi0ufLj0qKW2uIhEtjPJIzIZApWXI647+taRq1ItXbs+/wCgXYl/ZaPdXfiTTLbSILT+zo2khuEdi+4MAc5ONvPTtRGdSKhNybuBBqtvpVrqOoeHodD3m1hwl7GWMwkAUmR+cbOeeOlVTdRxVXm+QeZo3ug6HbzXukEaapgt2KSpO7XnmKudzLjG0+nYVnGtVaU03+gXPOVOQK9TRotHUeIZJYPDHhuGBmSxe1aRtpwrzbjuz6kVyUVGVWblvf8AAlF7TLee8K3HiS1W7gh0aSe1Vmw7IjDbuI57kAnsayqSUbqk7Ny1AsWdhpA0uw1Ke00ZG1KR3eK7nkQRxhtuyIDv3ye5qJTqObgm9P61ERw6PpunNqcn2fTpLaO9MMNzqkzBNgXJRUX5mfnriqlWnJJXd7dEFy1eW1lpVp4t062soPJElqEMjMceYRjv0UkkfrmoUpTdOo3rr+ADr3QdCt5r3SCNMQ28DbJlndrvzFUHcy4xtPp2FEa1VpT11fyA5bwxZWlzLf3V7D58VjZPc+RkgSMCAAcc455rsxE5RUVF2u7XGzWtYtK1G2l1iTQvIW0s5ZXgjLLb3LhwqlecgDPzfhWMpVIS9nz3u7X6oPIuaRpukay+lajNpcMCTPcxT20TMI5PLjLB1ycj069azqVKlNSgpX21+YnoR6VZaRrttpN5/ZEFru1UWkkUTsVkjMZYbsnr705zqU3KPM3oFyFLDS9dsbxLbTYdPe11CC3jljdmLJI5U78nk8ZquapSkryvdXDYt6no2iGHVbKJdMhks1P2d7eeSS43KwBEoIxz39DWUK1Vcs9de+wXZT1VdI0/UdR0WPw+JxYxbluELGUuoUlpOcbDnBx0HStYe0lGNTntd7f11DU0vEFvbalqWug28cUsVrZBZEZursgywzg4BwPYVjSlKEYtd2BSmstIuNW1fw/FpMUAsbeZorxXYzb41B3Pk4IPpjvWilUUY1XLdrQCxDY6HNrVpof9jQgXGnLNJc+Y/mLIYt4K84A4/HNJzq8jq82ztbyuGpDp2maVeaPZ29tY2VzeSWu+eGeV4bwyEE7os/KV6EDuKc6lSM3d2SfTb5gc/wCF7S21DVXsLqFZHuLeWOEnI2TbcqR75GPxrpxEpRgpp9hs6qXwvpVtb2t09sHTTbaT+1FJOHmESuoPPq+O3SuP6xUk3G/xPT0Fcdbi10211ALZQyb/AA3FO/mM53EnlevCnrx6cVMrykm39qwEkiabqOvaNo11pcMputMi33TOwkT92xXZg4GMfjmqXPGE5xk9GBxnhmGxuPEFvaahGHt7gmDcTjYzDCt+BxXbiHJU7x3WpTOisPDVjbR2FnqdrvvNtze3C7irNFECqx+wYgn1rlniJu8oOy0X37k3uM0q00vxBFp962k29oV1OK1ljgZvLmjdScEE9RjqOuadSVSi5RUr6fcFzOsNMtJdL1WaS3Vnh1K3gjJJ+VWkIZfxGK1qVJKUY36P8gbK/iw2MWuXNjp+nxWkNpM8e5WLNJz1OfTnHtV4ZS5FOUtxrYwTXQMKACgAoAKACgAoAt6cLY6hD9ruZLaANuaWOPey45GB9azq83K1DcDU1bxRd3fiyXW7OV4XDYgzglUAwAR0ORnI9zWdPDxjS9nL5hbQguvE2p3Utq/mRQC1k82FLaJY0V/72B1P1ojhoRurbhYlu/F2sXkPkySwJF5qzFIrdEBkU5DHA656+tEcJSTv8hWRkXdzLe3c11cMGmmcySNjGWJyeK2hBRjyrYZtW/jPWrWOBYpYA8ChFlNuhkKDohYjJX2rB4Sm29Nwshlp4v1iyhSKCaBRG7PETboTFuOSEJHyg+lEsJTk72FZDLXxTqtnHKkcsLh5WnHmwK/lyE5LJkfKfpTnhqUtbBZEM/iLVbguz3XzPdC8LKgU+aBtDAjpx26VSw9OLtbbQdkT3virVb+ERSSQRqZFlk8mBY/NcHIZ8D5uamGFpxd7BZFQ61ftcahOZh5moIyXJ2D5wTk/Tkdq09jCyjbRbBYtT+KtXubB7OWdCskYiklESiWRB0VnxkiojhaSlzWFZDpfFusS2T2zTx5ePyXnEKiZ0/ul8ZIpLC01LmsFkZl3f3F6lsk7KVtohDFhAuFHY46/U1tGCg3Zb6jsXtN8Salpdq1pC8MtsW3iG4hWVVb1APQ1lUw0Kj5mtQsMk8QapNeXV1LdF5rqA28pKjHln+EDGFHHamsPBJRtsFiTTfE2paXbLbwNA8SOZIhPAsnlN/eTPQ0VMPCpLme7Cw608U6raRTIJo5vNlM5a4hWUrIerqWHBpSw1OVrLYVkE3inVZ5LuSWWF2vIVgnzCv7wLnBPH3uetJYWmreQWQ6XxbrE1i9q88XzxeTJOIVEzx9NpfqRQsLTTv8AqFkZ2naldaVdi6s5dkoUqcqGDKeoIPBB9K1qU41FaYzQPivV/t8V2s8aGKNokiSFViCN95dmMYPesvqtPl5bCshJfFOqyXkFyJYojbxvHDHFCqxxqww2FxjnPXrQsNSUbNDsitY63qGmwQwWswSOG4F0gKA4kC7QefbtVzown8S12+QWIo9UvIra6t0l2x3UiySgKMllJIIPbknpVOlBtNrYLF+98VarqFnJbTSwgTACeSOFUkmA6b2AyayhhqcJc1gshtz4p1a7sHs5p4ysiCOWQRKJZEHRWfGSKI4anGXNYLIZdeJNTvIZIppkIlhSCQrEoZ1QgrkjnIwOaccNTi72CyJbvxXq95ZyW000X75BHNMsKrLKo7M4GSKUcLTg+a2wWKya9qKanHqKzKLqOIQq+wcIF2Yx06cVfsI8vJbfULFq38W6rbWUVtHJb5hj8mGdoFM0af3Vc8jrWbwtOUub5hYybW6msruG6t32TQuHRsZwR0rolGM001ox2LsviDU5odQhe5Jj1GQSXI2gb2H8vwrJUKas7fCKw+HxJqcNwJlmjZhaiz2vErKYh0UjGD9aHhqbVrdbhYYmv6kmpW2orOBdW0SwxP5a/KgBUDGMHgmn7Cm4uHRgVtPFs+oRfa7mS2h3bmljj3suORgfWnUvyPlVwNbWPFF1e+LJNbs5XhdG2wE4yqAYAI6c85HvWdPDxVL2cgS0K934m1O7e2bzYrdbaTzoktoViVZP72B1P1pww1ON+twsiS88WavfQGCWWBYjIsxSK3RAXU5DHA656+tKGFpwdwsjJu7qa+vJru4bdNM5kkYDGWPJ4FbRioxUVsBDVAFABQAUAFABQAUABOAT6DNAHRt4XC+I00n7WcNafafM8v8A6ZGTGM+2M1y/WH7NTt1t+Irk3/CKW0Wi215c6hLFLc232iN/sxa3HGQjSA8N+HepWKlzuKV7PvqFyWy8FfaIbOKe7nhv72ISwotozxICMqHkHQn9KmWMs3ZaLz/QLlePwtB/ZdhPc6l5N5fyvBDbmLIDrJsO5s8KPWreJlzNRV0tQuR+IPDtro0biO9uGnil8t4rm1MPmD+/GckMtOjiHUeq09fzBO5V0TSbXUUnkubqdPLKqsNrbmaWQnuF7AdzV1qsoNJLf5DZrr4J2anqUE91cNBZRRyn7PbF5pBIMj93njHOfSsfrnuxaWr7vQVzndUs4LC/eCC7FzCAGEgQqQD2Know7iuilNzhdr+vId9DYl8KCHU76E3hNnbWQvVuRH/rFYDYAM9STjr2rFYq8E0tW7WFcmPhG2Fy+lf2of7cSEym38j91uC7jHvz97Htil9alZT5fd9fxC5Vg8MifWdF0/7WQNStkn3+X/q9wJxjPPSqeJtCU7bOw7mZpOlzazq0GnW5USSsRubooAJJP0ANbVaqhDnYX0ubz+DopVt5bK8unha7jtZjcWbQspc4DqD95a5linqpLp0YuYZdeFLX7PfjTdUa8u7CZIpozBsU7n2Da2TnB4NOOKkmnOOjQXHTeFLBBqcEWtGW+02B5biH7MQpK9QrZ5weCaI4mbcbx0ewXFt/CFvd2Dtb388tylqblmW1JthgZKebn739aTxcovVaXtvqFxLDwnZXE2nWV3rBt9Rvo1ljhFvvVUYZAZsj5iOcUSxU/elGN4oLiaf4QjntLWa8vLiJrx2W3EFm0ygBtu6Qj7oJpVMXZvlW3mFzKttOntfFUGmzeWJ471YWLLvTO8DOO49u9dDmp0XNdh3Nm58O6egub/U9WNsrajNaiOC0zllbqBngd8dqwjXnpCEb6X3Fcw9U0afTdfm0jcJZklESkcbycbfpnIrohVUqXtB3Ni48LafEmpxRayZb7TIGlni+zEKxXGQjZ5wTg8VhDEzbi3H3W7CuTp4FZttmbqf+1Xg84RC0Ywg7d2wy9N2PwzxUPGWd7aX7hcis/CdhONKhm1h4r3U4BJBCLbcFJzwzZ4HGKqWKneTjHReYXM6Tw+Y49FZrjnUpXjI2f6orIE9eeue1a+3vzWWyuO5sp4WadbXSjdRKjavPaeaLcb8omdxOeQcfd7etYfWGnKpb7KFcov4Xtrq1SXR9SN7ILxLORXgMQDv91lOTla0WJaf7yNtLhcfqXhKO1069uLW8uJpLDH2hZrRokYZwWjY/eAP+NKGK5pJNaPzuFzN0fR4b63vL29uza2NptEjrHvdmY4VVX14rarVcJRjFXbGzo7zTLaPT4xYzW80SaDJMZmthmUeb1xn5X5xnnGDXHGpLmfMnfm7kpmfq3hO30qyYyX8wu1hWUb7YiCbIB2xyZ5bn8a2p4pykly6f10Hcjv8Aw1p9gtzaS6wF1a2h814Gi2xE4B8tXzy2D6c044mcrS5fdegXGSeFwmv6jpf2skWdo9z5nl/f2oHxjPHXFNYlump262C5Nd+FLey0iO4uNQmjuJLUXKE2x+ztkZ8sSD+L8MZqFipSnZLr31+4Lhf+FLfT9KE0+oTJctai4UtbH7O+RnYsg/i/DrTjinKei0v31+4Lk1/oEb3k9zf3kdvZWtpbGR7e2AZmdflVUzyeDk5qIYhqKjFXbb6hcZF4QtppmlXVtumtYtex3TQHO1WCsrLngg+lU8U1o4+9ewXMzWdHt7CzsL6xvHurO8D7Gki8t1ZDhgRk1tSquTcJKzQIxq3KCgQUAFABQAUAFABQAEZBHrQB2SeLdLFyuoyaZdNqX2P7IzCdRGBs27gMZzj1964XhqluVNWvfzFZjNL8V6fplnEYrW+S5S38mS2jnAtZm2kb2U5OTnJA7054acna6tf5oGh9v4yt/s1m91HqLXdpAIRFDdlLebaMKXUcjtnHXFS8JJXSas/LULGRNrsc9no8EtmJRYSSPKshykweTeRjqPSt1RacrPcLF/VfEtncaFPpdkmouk8qyf6dMJFtwpztj7+2T2rOnh5KanKy9OoWINC8QW2n6PdabdJfIs0yzCWxmETtgY2MT/DVVqEpz51+INXL0/inSbvU3upLK/tmkgiQTW1wBLCyDGEY9VIxnPORWaw1SKtddd1owszF8SayuuaoLpIpERIUiBlYNI4UfecjqxrooUnSja9xrQ3tbv7jT/BOm6VcKiahKB5hVwzC3Ri0YbHu2ce1c1GnGdaUun6k2uyB/FenG+k1pNPuBrckJjJMq+QHKbTIBjOcdqpYapyqDa5b/Mdh2neLNLtZdKvbjTLmXUNOt1tkKTqsbKAQGIIzuwfpSnhalpQi9G7hYwNE1Z9F1q31KNA5iYkoTjcpBBGe3BPNdNWl7SnyMfQ3ZfFdnE9p9lj1OdY7uO5ka9u/MbCHOxOwHuea5lhpNO7Wz2RNijaeIvs8ustHEVk1GZJI2ZhiIiXzPm9fwrWeHbUU38K/QdjrL+G3sLbxFqU1h9nlvbV0Fx9tSWKZ3I4hUDcQTyc9MVxQlKUoQvs+35iM7/hONOe5W4ltNSYvbm3kt1ugIIlKbSY0x1+vvWzwdS3Ldd/NhY09FS3kutH1u7sgy21qoa+S8UQoqAgF0I3eYBxgcZrCo5LmpQeje3UDAsfFtqljawXqanmzZ/KFndeUkyFtwWQfpkdq6ZYV3bjbXvuh2MGLVNviKPVpIs7boXBjVvRs7QT+XNdLpv2fJfpYdi3q+vpqVn5C27xn+0JrzJYHiT+H6j1rOnQcJXv0sCRHq2s/2n4ok1eFPs5eaORFkOdpXaOSO3GaunS5aXs35glodnqMFvZWniPUZrD7NLfWzILj7YksUruQcQgDOD1JPTFefByk4Qvon/VyTIk8awTL9rmi1Fr/AMkRGJbsi1Zgu0OVHOe+Oma6Pqck+VWte/mOxlxeIkj1XQbw2zkaXBHEy7xmQqWOR6ferX2D5Jq/xBYtWniXSxbaf/aGnXM02nTyS2/lTBVYM+/D5HY+lZyw9S75Xo1qFiWHxnFFfW9x9ikIi1Oa/wAeYOQ6kbenUZ603hG01fpb7gsZmk+Im0mwlihhLTm9iu0cn5Rsz8pHvmrq0HOV79LBYvat4ntLywu4rWPUjLeHLi7uzJHAM5IjA656ZPQVnTw04yV7WXYLGfo2rWlrZX2najbyzWV3sZjA4WSN0OQwzwep4NbVqUpSU4OzXcbRfuPFNkYmgtNPmigGlvp6K8oYjL7t5OOfce9YrDTveT1vcVidvFlhDpl3FY219FJdQeSbVpw1rESOXReue4HY0fVJuS5mv1CxW1HxDpN/9rvjpUh1a7h8t2kkDQxtgAyIuM7uO/SqhQqRtDm0XbcLMtyeLdKee81BdLuhqN7ZtbSt56+WmUC7lGM84HWs1hqllG6snfzCzGWviuwstPdba2vo5pLYwNaCcG0LFdpfaec98etVLDTlLVq1736hYSHxVp9pps0drbX0cs1qbdrTzwbQMV2lwp5z3x60vqtRyu2t736hYjfxRY3rXNvf2VwbG4gt4z5UgEkckQwHGeDnng01hpK0oyV7vfzCw2XxTbiGe0trKSOy/s57C3VpAWXcwYuxxySR0FUsPK6k3re/3BYprrNlLpukWF7ZTSwWLTtII5Qhk38jB7YOPrVypT5pyi97AYZroKCgQUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAYoAKACgAoAKACgBMD0FAC0AGB6CgAoAKACgAoAMD0FABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUDOr8P+ANX16JbnC2lowysswOXH+yo5P14rjrY2nTdlqyXI6yP4Q2u395q9wW77YVA/XNcrzGd/hFzDv8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYP8AhUVj/wBBa7/79pR/aM+yDmD/AIVFY/8AQWu/+/aUf2jPsg5g/wCFRWP/AEFrv/v2lH9oz7IOYRvhDZ4+XVroH3iU0f2jP+UOY5vXPhrq+lQvcWrpfwLy3lqVkUeu3v8Aga6KWOpzdpaMdzjDXcMSgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoGdj8PPDUevay892m6zswGZT0dz91T7cEmuLG13TjaO7Jbse3qoUADoK8UgXpQA0OrdGB+hoAdmi6AM0AIGBGQQRQAuaADNABQAhGaGB5H8TvDMVjPHrNogSO4fZOo6CTqGH1wc+/1r1cBXbXs5FJnndekUFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHsnwnRB4YuHGN7XbbvwVcV42YN+1XoTLc72uEko61/yAtQ/69pP/QTV0/jj6geU6Ratb/8ACKXC6ZJYNPPGDqC3Bf7Rx90oDxu969Kbv7RXvbp2KZt6Z4z125vYbt7UyafPLKhiWAKI1XOCsm7LHjkYrCeHppON9dAsO03xLrlzNoctzeWUltq3mkwRxYaJVU/LnPPbmnKhTSlZaxtqKxRttf1ay8M6KunIkMDW0ssrW9uJmQhyBmMtkJ6mqdGDqSUtdvIaRa/t7UF1ttYW8inhXQ/tZhjRhG+DjAycj5uc4zjj3qVSg6fJaz5rXuFtBsfi7xHBpl7PcRhh9g+1QzPaiMI2RwBuO5SDwaboUnJKPezFY7rQv7RbTI5NTmhluJf3n7lNqqpAIX3x61xVOXmaiLqadQBy3xERH8D6hvx8oRlz67xiunB39tGw1ueD17xYUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAei/CrW47W+udJncKLnEkJJ43gYK/Uj+VebmFK6U10FJHrea8ogZNFHPC8Mqho5FKsp7g8EUXs7oCidC01rSztTaJ5NkyvbJz+6ZehHPaq9pJNtPcCCPwxo1vqLajBp8Md6xZhKFyVY9WA6A/hTdabjyt6AYOk+BHs9bgv7mayIt2dh9mtfKaYsCMvzgYB6KAK6KmKUouKT17sdzbm8IaDcW1vBJpsXl2ylYgCylVJyRkHOM9qwVeom2mIsHw9pJntpvsEO+2iMMR24CpgjbjoRyevrUqrNJq+4FeDwfoFtDcww6XAqXKbJRg/Muc7c54HsKp16krNvYDajjWKNUQYVQFUegFZgOzQB5v8VdcjjsIdGjcGaVhLMAfuoOgP1P8AKvQwFJuXP2KieTV65YUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAHRu0UiyIxV1IZWBwQR3FJq6swPTvDvxTVIUt9dicsowLqFc7v8AeX19x+VeXWy/W9LYlxOsj8feGJFDf2vCvs6sp/UVyPC1k/hFZj/+E68Mf9Bm2/X/AApfVqv8rFZh/wAJ14Y/6DNt+v8AhR9Wq/ysLMP+E68Mf9Bm2/X/AAo+rVf5WFmH/CdeGP8AoM236/4UfVqv8rCzD/hOvDH/AEGbb9f8KPq1X+VhZh/wnXhj/oM236/4UfVqv8rCzGt488MKpP8AbEB9lDE/yp/Vaz+yOzOc134qWcULRaNC88xyBNKpVF98dT+ldFLL5N3qaIaj3PK7u7nvruW6upWlnlbc7t1Jr1owUFyrYohqgCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAzQAZoAM0AGaADNABmgAzQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAHQJ4L1+SNZEscq4DKfNTkH8a5vrVMnniUdS0LU9IVWvrVokY4DZDDPpkd60hWpzdluNNPYza1GFABQAUAFABQAU7AFIYUCCgAoA3LXwjrd5axXMFlvhlUMjeaoyD9TXO8TTTsTzxRBqPhzVtKg868s2jizgsGDAfXB4qo4iEpcq3GpJ7GVWwwo7DCgQUDswoEFABQAUAaum+HNV1e3a4sbXzYlbYW3qOfxNYzrwg7SJcknZk9z4Q120t2mlsG2KMna6scfQHNT9ap7BzJmHXQUFABQAUPQAo3AKACjYAoGFG4goAKACgAoAKACgAoAKACgAoAKACgAPQ/SgD3nTb23g0qASQhiYUyzNgfdH5V4M0927HI3FXM/V7iyXSLpr2RRavHhtzfKxwcfU59K0jDnacdWEJPofPZlia8uxcXF4pWUhREWwBgegr7XkkqcPZxjqutik7yd2XZLp7ZESKIugjDeZNLtz7ZPU1x06Eaz55OzvayRrKbigGomXyVtofMklj83DPtCr05NL6koczqSsk7d7vyD2rlZJCHUmPlokH751LMkrhAozjqaawSs5t6X6a38/ITqvaxC1/JNc2jW8ZYssitEXwAwx1PtW31WNKM41HtbXfRk+0k2rE41IlNn2c/afN8ryt3fGc59MVi8Gk783upXv8A8Ar2r7aiHUjGsiywFbhGVRGrZ3FumD6UlglJpxknF3d9rWBVbXvuRXl7KLS6ikjME6Rh1KvkEbgMg1th8NB1ITi7xbttboKc5WaejLlvdi6kcxJ+4U7RLn7x74Hp71yV6HsopSfvPWxcJ87dtkWK5iwFNbge2eFLuG38M6f5kQc/Z15J4Arw60G5O7OaUlGTuTXVzafZppZ5FW1KkSEsNuz0PrSUOe3K7kRnfY+fNTt0XUIjBcXKxT3LDAlOAvJGPTtX1+DquVKXPFNxXY1lHVaiPfLYQ3CFJJDAygb33M+7nOcfX8qmOFeJlGa0Ur9NrD9pyXQtzfKyuFD7F8pi6Pg5Y8D8qKOEaacnrrv5BKpoyvNe3qxXpCgeXOFB3j5Rxx05/wDr1vTwuHlOmm91cl1JWbLUuoukkiJArGEAy5lAwcZwM9TXNTwSnFScrc22n9WNJVWna2w5dQaW5SKCAyK0ayFy2AFNS8HGMHObtZ2t5gqrcrJF2uK5qwoEeo/DaeOHRJmkj3/6Q2BnHYV5eLi3NpHPVaUtTqpbuOWcyQnyypz8j8qcVzKKkuVu5lzrdHh/i+e2bxqPsDqbZw5YRn5WYKM/rmvo8DS/2OfMtdPzN7vmjcw4NTklW3ke1KQzsEVt4Jyfb0rsqYGMXNKd3FXtYaqtpNofBqL3EnyW4aPeUyJAWBHcr2FRUwapw5nLWye2mvmCqtvYitb26Nq7vDvfzmRfnGAMnqccAetaVcLR9qoQlZWvtf8Aq4ozlYeuqDyZGaLMqSCIIjhgzHpg1m8D76V9Gr6q2w/a+6D6nJCLgTWpR4YxIQHyGBOODin9RhLlcJ3UnbYPatX5kPa8uAiE2gVmyfnlAVR2yfU+lSsLT5pWldLsrt/IfPKy0GDUy8Nu0UBd5nZAu8cEe/pVfUbTleWkddv61F7W6Wgz+1ZQju9oVSKTy5T5gODnHHr1FU8BTeinq1daB7VroaZrzTYKBBQAUAFABQAUAFABQAUAFABQAHoaAOnvfEyXiRxnzBFEiqqY4JAxk18xissxleVrpR9TzquFqVG9UUbTU7ZrkPqKyS26bvLgHKqxHDY6E16NHBVMNBUqVmnu76nTCk6SSh82cnbXS28lyxiuj5spcYgbjgCvqa1GVaEbSWiS3HGXI3dFW5cy3jTLBKwdAv721ZjHjutdNGMY0lBySs76Na+pMm3K9hsLy2wheKObzUj8pg1s+1lzkH61dRQqtqTVm7q0lp3+8mN0k0tQckvHN5U08oTY/wBotWIbnOR6Yz+VKCSTgmorpaQ33FDSRfZ3hSbzIg+4G0YK27HGB0FCUJc0ZtWdvtBdqzSF3MMTBLj7UJTKSbZtpyMbfXGKVo29m2uS1t1f1Hrut7iMzS+ZNIlwLkujoVtm2rt6D36mmvctCMly2a1avqJ6ttrUJWe6Sdp45xLJGI1CWz7VGc/WiEY0uWNNqyd3drVg25XbLliVW9lEMc0cEg3FHhKhWHoenPpXJi7umnUacl1v0/4BdPSVkaVeYbhRa4HSN4jV9MtLHMixwRBGAH3iO9fO4/L8XiJvla5ThrYerOWj0KdvqcD3kf24SPYo4Y2ynh8dzXThsBVwkFGlZye7/wAjSnQdJe7ucxqN1HPfJIkNyFiuGfAt25HPTFfVYWi4UpKTjeS7lzldryKszxTahFcmG72quGT7O3zHnH5ZNdFKM4UXTbjfvcUneXNYhjRY7BrfZdM7SK2427dFIwPyFayblWVRuOz0uTa0eUdM5kF4qx3AWdxImbZ8hhjg+3FKEVFwbafLdb9wet0NlJaaWRbZmabBYyWbNsbGCV/wNVFR5FGUvh2tJa+oO9723LdrKiXgYRXOGjSIZtyuCD1PYda5cRBzpWbW7e9zSLtI1K8robBQBvaZr/8AZ+jPYqXVpJS7Mo7YAx+lePmWFxNbSjpc5cRSnN+4VZNU3uUR5IoWGJCnDOPT6Vz4PK54WPtNJT/AijhXT9/qY/iC6s5tehnsraeO2ii2hFhLclQDyPcGvrMvhU+rSjUaTlbr2NdbpvcyFdVs7ODyrrMEisx+ztzjPT867nG9WdS695d0K/upW2I8s9zG8kMp8uTf5y2rCRhnoe1a2ioPlktVazat6hfVXQj7jHs8mV1WdpVR7Z8MD2b6U1yXu2tY20auvQl3sOVGEM8zbogJY5FP2dlCsOOn92pnNc0YrXRp63uv8xpOzBi939tkLiVWiWMNDGxUHdnAHU+/1oXLRVOO2rer6WDWV2SXcvnXMUyW0r7E27JrZio9x71FCEYRcXK13e6a+70HJ8z0GWxMJt90dwwhkdxi2YZDD9OtVXSqKVpK8klugjpuOkYPa3UXlXOZpvMB+ztwMg4/SpjHlqRnzL3Y23Q2/da7s2wdwDYIzzgjmvGludAVIBQAUAFABQAUAFABQAUAFABQAUAFABQAtHqMSjQLhRoFwo0C4UaBcKNAuFGgXCiyC4UAFAgoAKACgYUaBcKNAuFGgXCjQLhRoFwosguFAgoAKACgYUWQBRoFwo0C4UaBcWjYAouxCUWQ7hRoFxaLILiUCCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgDQsdHuL63e5Ettb2yOIzNcyiNS5Gdo9Tj8qznVjFpdQFvdD1DT4y9xbkBZXibad2GUAnOO2GGD0OaUa8JbMLle1sLm8uLeGKJt1xII4iw2qzE4HJ4q5TjG9+gC3GnXVtKkbxFnaJZgIxu+Q9CcdKmFWMldMLkHlSeX5nlv5f9/adv59Krmje1wFMEy7cwyDdjblD82emPWmpRfUCW0sbi81CKxiTFxK+xVk+Xn3z0qZVIxjzPYCF4ZY874pEwATuQjAPTrVc0ejC41wYzh1Kn0IwaYm0kIDn2+tAJphQO6DIzigXMgoHcCcUCbSEJAIHc0BdXsAIOfagFJO4tA7oMg96BXQUDujRsdFub62+0CW1t4DJ5SyXMwjDvjO1c9TyPYVlKtGLtq/QLla4sbq1nnhmgkV7dykvy5CH3I4q1OLSaYDk0+5ksZrwRkQRFAzNxncSBj15B6UnUipct9QITBMJDGYZfMAyU2Hdj6daq8e4Fw6Nei/urLy1M9tG0kihs8KATj1OCOKj2sOVT6MCkYZVbaYnDbtuCpBz6fX2q7ruFxh4ODwfemK6AHNAKSYUDuISBjPegUpKO4uRz7UDugoFdBQO6A8Y96BNpBnr7UBdBQO6CgAoAKACgAoAKACgAoAKACgAoAKANq0uLC70JNNvLt7N4Ll545RCZFcMoDKQOQRtGOxzWEozjU9pBXurC6mra+I7CxNlb2Ut3DZRXk0ksbEsXjaNVXdj72SG47ZrCVCcrtpXsgsXbXxHo9vZWcRupmELWcgVo5GZfKI3Dk7R3xtA46nNZyw9Vtu3f8Qsxth4o0yJVXzWgdRbMZjHJ8wjDAp8jAnk5GflPOaJYWpf7wsVk8U2rMsTGU2hspYja7cRmVpi4GM4AxjntVvDSSv1vv5WFY3L3UU0h1l1K7uJfOvrh4hMhzArRFVKgNkqCQMqQP7tYQhKpdRXRfPUNTm5ddsz4v0u/MheCzEaySpG2X25yQGJY4zgFjniuqNCaoyh1YzU07UrW/ePT7m7uNQso7aZ767kUqVXeJEHzHPBXH1cgVjUpyh7yVnpZfmHQ4nUr2TUdQuL2Y/vJ5TI3tk5x+A4/Cu+nBQioroTPZFU7SevY1ZDt0E446DpxQJWE47Y70Cdugoxnnpmgat1D/634UCuKxBOQeg4oKm03dDePXvQRYXjHXnigpWsJxzwD1oEWIEhZZjJKUZUzGAm7e2RwT24yc+1S79DSnY2befTr7RbWxvrySzezmkdWWAyCRHwSOOjAjjPHNYyjUjNzgr3RfU1bXXtKt4IvInuYLe3+0qbFlLfahICELMOM9Ac9McVjOjUbd0m3bXsFi5F4r0yGczyXVxPDJPbSpZmI7bURrggZODg8jHXHrWbw1R6Jd9e4rMhuPEVlLDJapqUkExtwi6hFFKSMSbymWYuQR3z146VUcPNatXV9h6lFNctD4u1TUBdTww3UMscVwsZLqzKAG2jnqDWroy9jGNrtdA6Gtb63BJb3d2zSXMOmwwPBdSDb5t2qlAcHnncDzziME1zuk00tr307IDz+Q5B3MSx5JPc16drEztYYSM/j1oM9NhPQH0xQF728h5KnHpQVJxdhv455oMxOMc4zxQNWtqBx+HNADiVO3npQW2nYTj14z0oIa3sxOMHnnigelixCsJt5meYrKpXy49mQ+Tzz2wPzpNyvpsaU/hGUywoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoslsMKBBRZdQCgYUAFABQAUAFABQAUAFABQAUAFAhaYCUgCgAoAKLIAoGFABQAUAFABQAUAFABQAUAFABQIKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD/2Q==", + }, + ], + tool_failed: false, }, { role: "assistant", @@ -221,7 +209,6 @@ export const CHAT_WITH_MULTI_MODAL: ChatThread = { title: "Index.html Screenshots", model: "gpt-4o", tool_use: "agent", - read: true, isTitleGenerated: true, createdAt: "2024-11-12T08:33:06.826Z", updatedAt: "2024-11-12T11:53:45.561Z", diff --git a/refact-agent/gui/src/__tests__/ChatCapsFetchError.test.tsx b/refact-agent/gui/src/__tests__/ChatCapsFetchError.test.tsx deleted file mode 100644 index 4ba4668b8..000000000 --- a/refact-agent/gui/src/__tests__/ChatCapsFetchError.test.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import { render, waitFor } from "../utils/test-utils"; -import { describe, expect, test } from "vitest"; -import { HttpResponse, http } from "msw"; -import { - server, - goodPrompts, - noTools, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, -} from "../utils/mockServer"; -import { Chat } from "../features/Chat"; - -describe("chat caps error", () => { - test("error detail", async () => { - const errorMessage = - "500 Internal Server Error caps fetch failed: failed to open file 'hren'"; - server.use( - goodPing, - noTools, - goodPrompts, - goodUser, - chatLinks, - telemetryChat, - telemetryNetwork, - http.get("http://127.0.0.1:8001/v1/caps", () => { - return HttpResponse.json( - { - detail: errorMessage, - }, - { status: 500 }, - ); - }), - ); - - const app = render( - ({})} />, - ); - - const regex = new RegExp(errorMessage, "i"); - await waitFor(() => { - expect(app.queryByText(regex)).not.toBeNull(); - }); - }); -}); diff --git a/refact-agent/gui/src/__tests__/DeleteChat.test.tsx b/refact-agent/gui/src/__tests__/DeleteChat.test.tsx deleted file mode 100644 index 7887b6292..000000000 --- a/refact-agent/gui/src/__tests__/DeleteChat.test.tsx +++ /dev/null @@ -1,75 +0,0 @@ -import { render } from "../utils/test-utils"; -import { describe, expect, it } from "vitest"; -import { - server, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, - goodCaps, -} from "../utils/mockServer"; -import { InnerApp } from "../features/App"; -import { HistoryState } from "../features/History/historySlice"; - -describe("Delete a Chat form history", () => { - server.use( - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, - goodCaps, - ); - it("can delete a chat", async () => { - const now = new Date().toISOString(); - const history: HistoryState = { - abc123: { - title: "Test title", - isTitleGenerated: false, - messages: [], - id: "abc123", - model: "foo", - tool_use: "quick", - new_chat_suggested: { - wasSuggested: false, - }, - createdAt: now, - updatedAt: now, - read: true, - }, - }; - const { user, store, ...app } = render(, { - preloadedState: { - history, - teams: { - group: { id: "123", name: "test" }, - }, - pages: [{ name: "history" }], - config: { - apiKey: "test", - lspPort: 8001, - themeProps: {}, - host: "vscode", - addressURL: "Refact", - }, - }, - }); - - const itemTitleToDelete = "Test title"; - - const restoreButtonText = await app.findByText(itemTitleToDelete); - - const deleteButton = - restoreButtonText.parentElement?.parentElement?.parentElement?.querySelector( - '[title="delete chat"]', - ); - - expect(deleteButton).not.toBeNull(); - - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - await user.click(deleteButton!); - - expect(store.getState().history).toEqual({}); - }); -}); diff --git a/refact-agent/gui/src/__tests__/RestoreChat.test.tsx b/refact-agent/gui/src/__tests__/RestoreChat.test.tsx deleted file mode 100644 index 144b2bf58..000000000 --- a/refact-agent/gui/src/__tests__/RestoreChat.test.tsx +++ /dev/null @@ -1,75 +0,0 @@ -import { render } from "../utils/test-utils"; -import { describe, expect, test } from "vitest"; -import { - server, - goodPrompts, - goodCaps, - noTools, - noCommandPreview, - noCompletions, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, -} from "../utils/mockServer"; -import { InnerApp } from "../features/App"; - -describe("Restore Chat from history", () => { - test("Restore chat from history", async () => { - server.use( - goodPing, - goodCaps, - goodPrompts, - noTools, - noCommandPreview, - noCompletions, - goodUser, - chatLinks, - telemetryChat, - telemetryNetwork, - ); - - const { user, ...app } = render(, { - preloadedState: { - pages: [{ name: "login page" }, { name: "history" }], - teams: { - group: { id: "123", name: "test" }, - }, - history: { - id: { - title: "test title", - isTitleGenerated: true, - id: "id", - createdAt: "0", - updatedAt: "0", - model: "test", - tool_use: "explore", - messages: [ - { role: "user", content: "test user message", checkpoints: [] }, - { role: "assistant", content: "👋" }, - ], - new_chat_suggested: { - wasSuggested: false, - }, - read: true, - }, - }, - config: { - apiKey: "test", - lspPort: 8001, - themeProps: {}, - host: "vscode", - addressURL: "Refact", - }, - }, - }); - - const btn = app.getByText("test title"); - await user.click(btn); - - expect(app.queryByText("test user message")).not.toBeNull(); - - expect(app.queryByText("👋")).not.toBeNull(); - }); -}); diff --git a/refact-agent/gui/src/__tests__/StartNewChat.test.tsx b/refact-agent/gui/src/__tests__/StartNewChat.test.tsx deleted file mode 100644 index 62a464abe..000000000 --- a/refact-agent/gui/src/__tests__/StartNewChat.test.tsx +++ /dev/null @@ -1,113 +0,0 @@ -import { render } from "../utils/test-utils"; -import { describe, expect, test, beforeEach, afterEach } from "vitest"; -import { - server, - goodPrompts, - goodCaps, - noTools, - noCommandPreview, - noCompletions, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, - goodCapsWithKnowledgeFeature, -} from "../utils/mockServer"; -import { InnerApp } from "../features/App"; -import { stubResizeObserver } from "../utils/test-utils"; - -describe("Start a new chat", () => { - // TODO: this shouldn't need to be called here. - - beforeEach(() => { - stubResizeObserver(); - - server.use( - goodPing, - goodCaps, - goodPrompts, - noTools, - noCommandPreview, - noCompletions, - goodUser, - chatLinks, - telemetryChat, - telemetryNetwork, - ); - }); - - afterEach(() => { - server.resetHandlers(); - }); - - // TODO: copy this for other tests done at a higher level - test("open chat with New Chat Button", async () => { - const { user, ...app } = render(, { - preloadedState: { - pages: [{ name: "history" }], - teams: { - group: { id: "123", name: "test" }, - }, - config: { - apiKey: "test", - lspPort: 8001, - themeProps: {}, - host: "vscode", - addressURL: "Refact", - }, - }, - }); - const btn = app.getByText("New chat"); - await user.click(btn); - - const textarea = app.container.querySelector("textarea"); - expect(textarea).not.toBeNull(); - }); - test("open chat with New Chat Button when knowledge feature is available", async () => { - server.use(goodCapsWithKnowledgeFeature); - - const { user, ...app } = render(, { - preloadedState: { - pages: [{ name: "history" }], - teams: { - group: { id: "123", name: "test" }, - }, - config: { - apiKey: "test", - lspPort: 8001, - themeProps: {}, - host: "vscode", - addressURL: "Refact", - }, - }, - }); - const btn = app.getByText("New chat"); - await user.click(btn); - - const textarea = app.container.querySelector("textarea"); - expect(textarea).not.toBeNull(); - }); - test("open chat with New Chat Button when knowledge feature is NOT available", async () => { - const { user, ...app } = render(, { - preloadedState: { - pages: [{ name: "history" }], - teams: { - group: null, - }, - config: { - apiKey: "test", - lspPort: 8001, - themeProps: {}, - host: "vscode", - addressURL: "Refact", - }, - }, - }); - const btn = app.getByText("New chat"); - await user.click(btn); - - const textarea = app.container.querySelector("textarea"); - expect(textarea).not.toBeNull(); - }); -}); diff --git a/refact-agent/gui/src/__tests__/chatCommands.test.ts b/refact-agent/gui/src/__tests__/chatCommands.test.ts new file mode 100644 index 000000000..051b8efb3 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatCommands.test.ts @@ -0,0 +1,363 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { + sendChatCommand, + sendUserMessage, + updateChatParams, + abortGeneration, + respondToToolConfirmation, + respondToToolConfirmations, + updateMessage, + removeMessage, + type ChatCommand, +} from "../services/refact/chatCommands"; + +type MockRequestInit = { body?: string; headers?: Record }; +type MockCall = [string, MockRequestInit]; + +const mockFetch = + vi.fn<(url: string, init: MockRequestInit) => Promise>(); + +function getRequestBody(call: MockCall): Record { + return JSON.parse(call[1].body ?? "{}") as Record; +} + +describe("chatCommands", () => { + beforeEach(() => { + global.fetch = mockFetch as unknown as typeof fetch; + mockFetch.mockReset(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe("sendChatCommand", () => { + it("should send POST request to correct URL", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + const chatId = "test-chat-123"; + const port = 8001; + const command = { type: "abort" as const }; + + await sendChatCommand(chatId, port, undefined, command); + + expect(mockFetch).toHaveBeenCalledWith( + `http://127.0.0.1:${port}/v1/chats/${chatId}/commands`, + expect.objectContaining({ + method: "POST", + headers: { "Content-Type": "application/json" }, + }), + ); + }); + + it("should include client_request_id in request body", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + const command = { type: "abort" as const }; + + await sendChatCommand("test", 8001, undefined, command); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody).toHaveProperty("client_request_id"); + expect(typeof calledBody.client_request_id).toBe("string"); + expect(calledBody.type).toBe("abort"); + }); + + it("should include authorization header when apiKey provided", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await sendChatCommand("test", 8001, "test-key", { + type: "abort" as const, + }); + + const call = mockFetch.mock.calls[0] as MockCall; + expect(call[1].headers).toHaveProperty( + "Authorization", + "Bearer test-key", + ); + }); + + it("should throw on HTTP error", async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 500, + statusText: "Internal Server Error", + text: () => Promise.resolve("Error details"), + } as Response); + + await expect( + sendChatCommand("test", 8001, undefined, { type: "abort" as const }), + ).rejects.toThrow("Failed to send command"); + }); + }); + + describe("sendUserMessage", () => { + it("should send user_message command with string content", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await sendUserMessage("test-chat", "Hello world", 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("user_message"); + expect(calledBody.content).toBe("Hello world"); + }); + + it("should send user_message command with multi-modal content", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + const content = [ + { type: "text" as const, text: "What is this?" }, + { + type: "image_url" as const, + image_url: { url: "data:image/png;base64,..." }, + }, + ]; + + await sendUserMessage("test-chat", content, 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("user_message"); + expect(Array.isArray(calledBody.content)).toBe(true); + expect(calledBody.content).toEqual(content); + }); + }); + + describe("updateChatParams", () => { + it("should send set_params command", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await updateChatParams( + "test-chat", + { model: "gpt-4", mode: "AGENT" }, + 8001, + ); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("set_params"); + expect(calledBody.patch).toEqual({ model: "gpt-4", mode: "AGENT" }); + }); + + it("should send partial params update", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await updateChatParams("test-chat", { boost_reasoning: true }, 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("set_params"); + expect(calledBody.patch).toEqual({ boost_reasoning: true }); + }); + }); + + describe("abortGeneration", () => { + it("should send abort command", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await abortGeneration("test-chat", 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("abort"); + }); + }); + + describe("respondToToolConfirmation", () => { + it("should send tool_decision command with accepted=true", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await respondToToolConfirmation("test-chat", "call_123", true, 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("tool_decision"); + expect(calledBody.tool_call_id).toBe("call_123"); + expect(calledBody.accepted).toBe(true); + }); + + it("should send tool_decision command with accepted=false", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await respondToToolConfirmation("test-chat", "call_456", false, 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("tool_decision"); + expect(calledBody.tool_call_id).toBe("call_456"); + expect(calledBody.accepted).toBe(false); + }); + }); + + describe("respondToToolConfirmations", () => { + it("should send tool_decisions command with object array", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + const decisions = [ + { tool_call_id: "call_1", accepted: true }, + { tool_call_id: "call_2", accepted: false }, + { tool_call_id: "call_3", accepted: true }, + ]; + + await respondToToolConfirmations("test-chat", decisions, 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("tool_decisions"); + expect(calledBody.decisions).toEqual(decisions); + }); + }); + + describe("updateMessage", () => { + it("should send update_message command", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await updateMessage("test-chat", "msg_5", "Updated text", 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("update_message"); + expect(calledBody.message_id).toBe("msg_5"); + expect(calledBody.content).toBe("Updated text"); + }); + + it("should send update_message with regenerate flag", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await updateMessage( + "test-chat", + "msg_5", + "Updated text", + 8001, + undefined, + true, + ); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("update_message"); + expect(calledBody.regenerate).toBe(true); + }); + }); + + describe("removeMessage", () => { + it("should send remove_message command", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await removeMessage("test-chat", "msg_5", 8001); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("remove_message"); + expect(calledBody.message_id).toBe("msg_5"); + }); + + it("should send remove_message with regenerate flag", async () => { + mockFetch.mockResolvedValueOnce({ ok: true } as Response); + + await removeMessage("test-chat", "msg_5", 8001, undefined, true); + + const calledBody = getRequestBody(mockFetch.mock.calls[0] as MockCall); + expect(calledBody.type).toBe("remove_message"); + expect(calledBody.regenerate).toBe(true); + }); + }); +}); + +describe("Command Types", () => { + it("should correctly type user_message command with string", () => { + const command: ChatCommand = { + type: "user_message", + content: "Hello", + attachments: [], + client_request_id: "test-id", + }; + + expect(command.type).toBe("user_message"); + }); + + it("should correctly type user_message command with multimodal array", () => { + const command: ChatCommand = { + type: "user_message", + content: [ + { type: "text", text: "Hello" }, + { type: "image_url", image_url: { url: "data:..." } }, + ], + attachments: [], + client_request_id: "test-id", + }; + + expect(command.type).toBe("user_message"); + }); + + it("should correctly type set_params command", () => { + const command: ChatCommand = { + type: "set_params", + patch: { + model: "gpt-4", + mode: "AGENT", + boost_reasoning: true, + }, + client_request_id: "test-id", + }; + + expect(command.type).toBe("set_params"); + }); + + it("should correctly type abort command", () => { + const command: ChatCommand = { + type: "abort", + client_request_id: "test-id", + }; + expect(command.type).toBe("abort"); + }); + + it("should correctly type tool_decision command", () => { + const command: ChatCommand = { + type: "tool_decision", + tool_call_id: "call_123", + accepted: true, + client_request_id: "test-id", + }; + + expect(command.type).toBe("tool_decision"); + }); + + it("should correctly type ide_tool_result command", () => { + const command: ChatCommand = { + type: "ide_tool_result", + tool_call_id: "call_123", + content: "result", + tool_failed: false, + client_request_id: "test-id", + }; + + expect(command.type).toBe("ide_tool_result"); + }); + + it("should correctly type tool_decisions command", () => { + const command: ChatCommand = { + type: "tool_decisions", + decisions: [ + { tool_call_id: "call_1", accepted: true }, + { tool_call_id: "call_2", accepted: false }, + ], + client_request_id: "test-id", + }; + + expect(command.type).toBe("tool_decisions"); + }); + + it("should correctly type update_message command", () => { + const command: ChatCommand = { + type: "update_message", + message_id: "msg_5", + content: "Updated", + regenerate: true, + client_request_id: "test-id", + }; + + expect(command.type).toBe("update_message"); + }); + + it("should correctly type remove_message command", () => { + const command: ChatCommand = { + type: "remove_message", + message_id: "msg_5", + regenerate: false, + client_request_id: "test-id", + }; + + expect(command.type).toBe("remove_message"); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatReducer.test.ts b/refact-agent/gui/src/__tests__/chatReducer.test.ts new file mode 100644 index 000000000..8290bd90f --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatReducer.test.ts @@ -0,0 +1,317 @@ +import { expect, test, describe, beforeEach } from "vitest"; +import { chatReducer } from "../features/Chat/Thread/reducer"; +import type { Chat } from "../features/Chat/Thread/types"; +import { + newChatAction, + createChatWithId, + closeThread, + switchToThread, + addThreadImage, + removeThreadImageByIndex, + applyChatEvent, +} from "../features/Chat/Thread/actions"; +import type { ChatEventEnvelope } from "../services/refact/chatSubscription"; + +describe("Chat Thread Reducer - Core Functionality", () => { + let initialState: Chat; + let chatId: string; + + beforeEach(() => { + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + initialState = chatReducer(emptyState, newChatAction(undefined)); + chatId = initialState.current_thread_id; + }); + + describe("Chat Thread Creation", () => { + test("should_create_new_chat_with_initial_state", () => { + expect(initialState.open_thread_ids).toHaveLength(1); + expect(initialState.current_thread_id).toBe( + initialState.open_thread_ids[0], + ); + expect(initialState.threads[chatId]?.thread.messages).toHaveLength(0); + }); + + test("should_preserve_last_used_parameters", () => { + const customTitle = "Test Chat Title"; + const state = chatReducer( + initialState, + newChatAction({ title: customTitle }), + ); + const newChatId = state.current_thread_id; + + expect(state.threads[newChatId]?.thread.title).toBe(customTitle); + expect(state.open_thread_ids).toHaveLength(2); + }); + }); + + describe("Task Chat Handling", () => { + test("should_not_add_task_chat_to_open_tabs", () => { + const taskChatId = "task-chat-123"; + const state = chatReducer( + initialState, + createChatWithId({ + id: taskChatId, + isTaskChat: true, + title: "Task Chat", + }), + ); + + expect(state.open_thread_ids).not.toContain(taskChatId); + expect(state.threads[taskChatId]).toBeDefined(); + expect(state.threads[taskChatId]?.thread.is_task_chat).toBe(true); + }); + + test("should_preserve_is_task_chat_flag_on_snapshot", () => { + const taskChatId = "task-chat-456"; + const state = chatReducer( + initialState, + createChatWithId({ + id: taskChatId, + isTaskChat: true, + title: "Task Chat", + }), + ); + + expect(state.threads[taskChatId]?.thread.is_task_chat).toBe(true); + expect(state.open_thread_ids).not.toContain(taskChatId); + }); + }); + + describe("Thread Lifecycle", () => { + test("should_switch_threads_and_reset_snapshot_received", () => { + const state1 = chatReducer(initialState, newChatAction(undefined)); + const chat1Id = initialState.current_thread_id; + const chat2Id = state1.current_thread_id; + + const snapshotEvent: ChatEventEnvelope = { + chat_id: chat2Id, + seq: "1", + type: "snapshot", + thread: { + id: chat2Id, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + let state = chatReducer(state1, applyChatEvent(snapshotEvent)); + expect(state.threads[chat2Id]?.snapshot_received).toBe(true); + + state = chatReducer(state, switchToThread({ id: chat1Id })); + + expect(state.current_thread_id).toBe(chat1Id); + expect(state.threads[chat1Id]?.snapshot_received).toBe(false); + }); + + test("should_close_thread_when_not_streaming", () => { + const state1 = chatReducer(initialState, newChatAction(undefined)); + const chat1Id = initialState.current_thread_id; + const chat2Id = state1.current_thread_id; + + const state = chatReducer(state1, closeThread({ id: chat2Id })); + + expect(state.open_thread_ids).not.toContain(chat2Id); + expect(state.threads[chat2Id]).toBeUndefined(); + expect(state.current_thread_id).toBe(chat1Id); + }); + + test("should_keep_thread_in_memory_when_streaming", () => { + const state1 = chatReducer(initialState, newChatAction(undefined)); + const chat2Id = state1.current_thread_id; + + const snapshotEvent: ChatEventEnvelope = { + chat_id: chat2Id, + seq: "1", + type: "snapshot", + thread: { + id: chat2Id, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + let state = chatReducer(state1, applyChatEvent(snapshotEvent)); + expect(state.threads[chat2Id]?.streaming).toBe(true); + + state = chatReducer(state, closeThread({ id: chat2Id })); + + expect(state.open_thread_ids).not.toContain(chat2Id); + expect(state.threads[chat2Id]).toBeDefined(); + expect(state.threads[chat2Id]?.streaming).toBe(true); + }); + }); + + describe("Image Attachment", () => { + test("should_add_image_up_to_limit", () => { + let state = initialState; + + for (let i = 0; i < 5; i++) { + state = chatReducer( + state, + addThreadImage({ + id: chatId, + image: { + name: `image${i}.png`, + content: `data:image/png;base64,${i}`, + type: "image/png", + }, + }), + ); + } + + expect(state.threads[chatId]?.attached_images).toHaveLength(5); + + state = chatReducer( + state, + addThreadImage({ + id: chatId, + image: { + name: "image5.png", + content: "data:image/png;base64,5", + type: "image/png", + }, + }), + ); + + expect(state.threads[chatId]?.attached_images).toHaveLength(5); + }); + + test("should_remove_image_by_index", () => { + let state = initialState; + + state = chatReducer( + state, + addThreadImage({ + id: chatId, + image: { + name: "image1.png", + content: "data:image/png;base64,1", + type: "image/png", + }, + }), + ); + + state = chatReducer( + state, + addThreadImage({ + id: chatId, + image: { + name: "image2.png", + content: "data:image/png;base64,2", + type: "image/png", + }, + }), + ); + + expect(state.threads[chatId]?.attached_images).toHaveLength(2); + + state = chatReducer( + state, + removeThreadImageByIndex({ + id: chatId, + index: 0, + }), + ); + + expect(state.threads[chatId]?.attached_images).toHaveLength(1); + expect(state.threads[chatId]?.attached_images[0]?.name).toBe( + "image2.png", + ); + }); + + test("should_handle_image_removal_edge_cases", () => { + let state = initialState; + + state = chatReducer( + state, + removeThreadImageByIndex({ + id: chatId, + index: 0, + }), + ); + + expect(state.threads[chatId]?.attached_images).toHaveLength(0); + + state = chatReducer( + state, + addThreadImage({ + id: chatId, + image: { + name: "image1.png", + content: "data:image/png;base64,1", + type: "image/png", + }, + }), + ); + + state = chatReducer( + state, + removeThreadImageByIndex({ + id: chatId, + index: 999, + }), + ); + + expect(state.threads[chatId]?.attached_images).toHaveLength(1); + }); + }); + + describe("Edge Cases", () => { + test("should_handle_operations_on_nonexistent_thread_gracefully", () => { + const state = chatReducer( + initialState, + closeThread({ id: "nonexistent-id" }), + ); + + expect(state.threads["nonexistent-id"]).toBeUndefined(); + expect(state.current_thread_id).toBe(chatId); + }); + + test("should_maintain_state_consistency_with_concurrent_operations", () => { + const state1 = chatReducer(initialState, newChatAction(undefined)); + const chat1Id = initialState.current_thread_id; + const chat2Id = state1.current_thread_id; + + let state = state1; + state = chatReducer(state, switchToThread({ id: chat1Id })); + expect(state.current_thread_id).toBe(chat1Id); + + state = chatReducer(state, closeThread({ id: chat2Id })); + expect(state.current_thread_id).toBe(chat1Id); + expect(state.open_thread_ids).toContain(chat1Id); + expect(state.open_thread_ids).not.toContain(chat2Id); + }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatSSEProtocol.test.ts b/refact-agent/gui/src/__tests__/chatSSEProtocol.test.ts new file mode 100644 index 000000000..06910246c --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatSSEProtocol.test.ts @@ -0,0 +1,1336 @@ +/** + * SSE Protocol Completeness & Correctness Tests + * + * Tests all ChatEvent types from backend (engine/src/chat/types.rs) + * Validates event structure, sequence numbers, and state transitions + * + * Run with: npm run test:no-watch -- chatSSEProtocol + */ + +/* eslint-disable @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/require-await, @typescript-eslint/ban-ts-comment */ +// @ts-nocheck - Testing runtime behavior with discriminated unions +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { + subscribeToChatEvents, + applyDeltaOps, + type EventEnvelope, + type DeltaOp, +} from "../services/refact/chatSubscription"; +import type { ChatMessage } from "../services/refact/types"; + +const createMockReader = (chunks: string[]) => { + let index = 0; + return { + read: vi.fn(async () => { + if (index >= chunks.length) { + return { done: true, value: undefined }; + } + const encoder = new TextEncoder(); + return { done: false, value: encoder.encode(chunks[index++]) }; + }), + }; +}; + +const createMockFetch = (chunks: string[]) => { + return vi.fn().mockResolvedValue({ + ok: true, + body: { + getReader: () => createMockReader(chunks), + }, + }); +}; + +describe("SSE Protocol - Event Types", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + describe("Snapshot Event", () => { + it("should parse snapshot with all fields", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test Chat", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("snapshot"); + expect(events[0].seq).toBe("0"); + expect((events[0] as any).thread.id).toBe("test-123"); + expect((events[0] as any).runtime.state).toBe("idle"); + }); + + it("should handle snapshot with messages", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { role: "user", content: "Hello", message_id: "msg-1" }, + { role: "assistant", content: "Hi there", message_id: "msg-2" }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].messages).toHaveLength(2); + expect(events[0].messages[0].role).toBe("user"); + expect(events[0].messages[1].role).toBe("assistant"); + }); + }); + + describe("Stream Events", () => { + it("should parse stream_started event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "stream_started", + message_id: "msg-new", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("stream_started"); + expect(events[0].message_id).toBe("msg-new"); + }); + + it("should parse stream_delta with all op types", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "2", + type: "stream_delta", + message_id: "msg-new", + ops: [ + { op: "append_content", text: "Hello" }, + { op: "append_reasoning", text: "thinking..." }, + { + op: "set_tool_calls", + tool_calls: [ + { id: "call_1", function: { name: "test", arguments: "{}" } }, + ], + }, + { op: "set_thinking_blocks", blocks: [{ thinking: "step 1" }] }, + { op: "add_citation", citation: { url: "http://example.com" } }, + { + op: "set_usage", + usage: { prompt_tokens: 100, completion_tokens: 50 }, + }, + { op: "merge_extra", extra: { custom_field: "value" } }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("stream_delta"); + expect(events[0].ops).toHaveLength(7); + expect(events[0].ops[0].op).toBe("append_content"); + expect(events[0].ops[6].op).toBe("merge_extra"); + }); + + it("should parse stream_finished with all finish_reason values", async () => { + const reasons = ["stop", "length", "abort", "error", "tool_calls", null]; + + for (const reason of reasons) { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "3", + type: "stream_finished", + message_id: "msg-new", + finish_reason: reason, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(event)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("stream_finished"); + expect(events[0].finish_reason).toBe(reason); + } + }); + }); + + describe("Message Events", () => { + it("should parse message_added event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "4", + type: "message_added", + message: { role: "user", content: "New message", message_id: "msg-5" }, + index: 2, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("message_added"); + expect(events[0].message.role).toBe("user"); + expect(events[0].index).toBe(2); + }); + + it("should parse message_updated event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "5", + type: "message_updated", + message_id: "msg-3", + message: { + role: "user", + content: "Updated content", + message_id: "msg-3", + }, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("message_updated"); + expect(events[0].message_id).toBe("msg-3"); + }); + + it("should parse message_removed event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "6", + type: "message_removed", + message_id: "msg-4", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("message_removed"); + expect(events[0].message_id).toBe("msg-4"); + }); + + it("should parse messages_truncated event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "7", + type: "messages_truncated", + from_index: 5, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("messages_truncated"); + expect(events[0].from_index).toBe(5); + }); + }); + + describe("State Events", () => { + it("should parse thread_updated event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "8", + type: "thread_updated", + title: "New Title", + model: "gpt-4o", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("thread_updated"); + expect(events[0].title).toBe("New Title"); + }); + }); + + describe("Pause Events", () => { + it("should parse pause_required event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "11", + type: "pause_required", + reasons: [ + { + type: "confirmation", + command: "patch", + rule: "always", + tool_call_id: "call_1", + integr_config_path: null, + }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("pause_required"); + expect(events[0].reasons).toHaveLength(1); + expect(events[0].reasons[0].type).toBe("confirmation"); + }); + + it("should parse pause_cleared event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "12", + type: "pause_cleared", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("pause_cleared"); + }); + }); + + describe("IDE Tool Events", () => { + it("should parse ide_tool_required event", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "13", + type: "ide_tool_required", + tool_call_id: "call_ide_1", + tool_name: "goto", + args: { file: "test.ts", line: 42 }, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("ide_tool_required"); + expect(events[0].tool_call_id).toBe("call_ide_1"); + expect(events[0].tool_name).toBe("goto"); + expect(events[0].args).toEqual({ file: "test.ts", line: 42 }); + }); + }); + + describe("Ack Events", () => { + it("should parse ack event with success", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "14", + type: "ack", + client_request_id: "req-123", + accepted: true, + result: null, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("ack"); + expect(events[0].client_request_id).toBe("req-123"); + expect(events[0].accepted).toBe(true); + }); + + it("should parse ack event with error", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "15", + type: "ack", + client_request_id: "req-456", + accepted: false, + result: { error: "Invalid command" }, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("ack"); + expect(events[0].accepted).toBe(false); + expect(events[0].result).toEqual({ error: "Invalid command" }); + }); + }); +}); + +describe("SSE Protocol - Sequence Numbers", () => { + it("should accept string sequence numbers", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "42", + type: "pause_cleared", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].seq).toBe("42"); + }); + + it("should accept numeric sequence numbers", async () => { + const event = { + chat_id: "test-123", + seq: 42, + type: "pause_cleared", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].seq).toBe("42"); + }); + + it("should handle monotonically increasing sequences", async () => { + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "2", + type: "pause_cleared", + })}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "3", + type: "pause_cleared", + })}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 20)); + + expect(events).toHaveLength(3); + expect(events[0].seq).toBe("1"); + expect(events[1].seq).toBe("2"); + expect(events[2].seq).toBe("3"); + }); +}); + +describe("SSE Protocol - Field Variations", () => { + describe("RuntimeState variations", () => { + it("should handle runtime with pause_reasons in snapshot", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "paused", + paused: true, + error: null, + queue_size: 1, + pause_reasons: [ + { + type: "confirmation", + command: "patch", + rule: "always", + tool_call_id: "call_1", + integr_config_path: null, + }, + ], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].runtime.pause_reasons).toHaveLength(1); + expect(events[0].runtime.pause_reasons[0].type).toBe("confirmation"); + }); + + it("should handle runtime with error state", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "error", + paused: false, + error: "Connection timeout", + queue_size: 0, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].runtime.state).toBe("error"); + expect(events[0].runtime.error).toBe("Connection timeout"); + }); + + it("should handle runtime with queue_size > 0", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 5, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].runtime.queue_size).toBe(5); + }); + }); + + describe("ThreadParams variations", () => { + it("should handle thread with context_tokens_cap set", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: true, + context_tokens_cap: 8000, + include_project_info: false, + checkpoints_enabled: false, + is_title_generated: true, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].thread.context_tokens_cap).toBe(8000); + expect(events[0].thread.boost_reasoning).toBe(true); + expect(events[0].thread.include_project_info).toBe(false); + expect(events[0].thread.checkpoints_enabled).toBe(false); + expect(events[0].thread.is_title_generated).toBe(true); + }); + + it("should handle thread with different modes", async () => { + const modes = ["AGENT", "EXPLORE", "QUICK"]; + + for (const mode of modes) { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode, + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].thread.mode).toBe(mode); + } + }); + }); + + describe("PauseReason variations", () => { + it("should handle pause_reason with integr_config_path", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "pause_required", + reasons: [ + { + type: "integration", + command: "docker_exec", + rule: "ask", + tool_call_id: "call_1", + integr_config_path: "/path/to/config.yaml", + }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].reasons[0].integr_config_path).toBe( + "/path/to/config.yaml", + ); + }); + + it("should handle multiple pause_reasons", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "pause_required", + reasons: [ + { + type: "confirmation", + command: "patch", + rule: "always", + tool_call_id: "call_1", + integr_config_path: null, + }, + { + type: "confirmation", + command: "shell", + rule: "ask", + tool_call_id: "call_2", + integr_config_path: null, + }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].reasons).toHaveLength(2); + expect(events[0].reasons[0].tool_call_id).toBe("call_1"); + expect(events[0].reasons[1].tool_call_id).toBe("call_2"); + }); + }); +}); + +describe("SSE Protocol - Edge Cases", () => { + it("should handle empty messages array in snapshot", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Empty", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].messages).toEqual([]); + }); + + it("should handle null finish_reason", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "stream_finished", + message_id: "msg-1", + finish_reason: null, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].finish_reason).toBeNull(); + }); + + it("should handle empty pause_reasons array", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "pause_required", + reasons: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].reasons).toEqual([]); + }); + + it("should skip [DONE] marker", async () => { + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`, + `data: [DONE]\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + }); + + it("should handle malformed JSON gracefully", async () => { + const events: EventEnvelope[] = []; + const errors: Error[] = []; + const mockFetch = createMockFetch([ + `data: {invalid json}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: (e) => errors.push(e), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle messages with all ChatMessage fields", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "user", + content: "Hello", + message_id: "msg-1", + }, + { + role: "assistant", + content: "Hi", + message_id: "msg-2", + tool_calls: [ + { + id: "call_1", + type: "function", + function: { name: "test", arguments: "{}" }, + }, + ], + finish_reason: "tool_calls", + usage: { prompt_tokens: 10, completion_tokens: 5 }, + }, + { + role: "tool", + content: "Result", + message_id: "msg-3", + tool_call_id: "call_1", + tool_failed: false, + }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].messages).toHaveLength(3); + expect(events[0].messages[1].tool_calls).toHaveLength(1); + expect(events[0].messages[1].finish_reason).toBe("tool_calls"); + expect(events[0].messages[2].tool_call_id).toBe("call_1"); + }); + + it("should handle multimodal message content", async () => { + const snapshot: EventEnvelope = { + chat_id: "test-123", + seq: "0", + type: "snapshot", + thread: { + id: "test-123", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What's in this image?" }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,..." }, + }, + ], + message_id: "msg-1", + }, + ], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify(snapshot)}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(Array.isArray(events[0].messages[0].content)).toBe(true); + expect((events[0].messages[0].content as any)[0].type).toBe("text"); + expect((events[0].messages[0].content as any)[1].type).toBe("image_url"); + }); + + it("should handle stream_delta with empty ops array", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [], + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].ops).toEqual([]); + }); + + it("should handle very long sequence numbers", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "999999999999", + type: "pause_cleared", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].seq).toBe("999999999999"); + }); + + it("should handle thread_updated with flattened params", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "thread_updated", + title: "New Title", + model: "gpt-4o", + boost_reasoning: true, + custom_field: "custom_value", + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].type).toBe("thread_updated"); + expect((events[0] as any).title).toBe("New Title"); + expect((events[0] as any).custom_field).toBe("custom_value"); + }); + + it("should handle ack with null result", async () => { + const event: EventEnvelope = { + chat_id: "test-123", + seq: "1", + type: "ack", + client_request_id: "req-123", + accepted: true, + result: null, + }; + + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([`data: ${JSON.stringify(event)}\n\n`]); + global.fetch = mockFetch; + + subscribeToChatEvents("test-123", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].result).toBeNull(); + }); + + it("should handle rapid event sequence", async () => { + const events: EventEnvelope[] = []; + const mockFetch = createMockFetch([ + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "stream_started", + message_id: "msg-1", + })}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "2", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "H" }], + })}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "3", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "i" }], + })}\n\n`, + `data: ${JSON.stringify({ + chat_id: "test", + seq: "4", + type: "stream_finished", + message_id: "msg-1", + finish_reason: "stop", + })}\n\n`, + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 20)); + + expect(events).toHaveLength(4); + expect(events[0].type).toBe("stream_started"); + expect(events[1].type).toBe("stream_delta"); + expect(events[2].type).toBe("stream_delta"); + expect(events[3].type).toBe("stream_finished"); + }); +}); + +describe("DeltaOp Application - merge_extra", () => { + it("should merge extra fields into message.extra", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg-1", + }; + + const ops: DeltaOp[] = [{ op: "merge_extra", extra: { metering_a: 100 } }]; + + const result = applyDeltaOps(message, ops) as any; + expect(result.extra).toEqual({ metering_a: 100 }); + }); + + it("should merge multiple extra fields incrementally", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg-1", + }; + + const ops: DeltaOp[] = [ + { op: "merge_extra", extra: { metering_a: 100 } }, + { op: "merge_extra", extra: { metering_b: 200 } }, + { op: "merge_extra", extra: { metering_a: 150 } }, + ]; + + const result = applyDeltaOps(message, ops) as any; + expect(result.extra).toEqual({ metering_a: 150, metering_b: 200 }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatSSEProtocolCornerCases.test.ts b/refact-agent/gui/src/__tests__/chatSSEProtocolCornerCases.test.ts new file mode 100644 index 000000000..28d8c4582 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatSSEProtocolCornerCases.test.ts @@ -0,0 +1,560 @@ +/** + * SSE Protocol Corner Cases Tests + * + * Tests chunking, sequence gaps, disconnects, and message variations + * + * Run with: npm run test:no-watch -- chatSSEProtocolCornerCases + */ + +/* eslint-disable @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/require-await, @typescript-eslint/ban-ts-comment */ +// @ts-nocheck - Testing runtime behavior +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { subscribeToChatEvents } from "../services/refact/chatSubscription"; + +const createMockReader = (chunks: Uint8Array[]) => { + let index = 0; + return { + read: vi.fn(async () => { + if (index >= chunks.length) { + return { done: true, value: undefined }; + } + return { done: false, value: chunks[index++] }; + }), + }; +}; + +const createMockFetch = (chunks: Uint8Array[]) => { + return vi.fn().mockResolvedValue({ + ok: true, + body: { + getReader: () => createMockReader(chunks), + }, + }); +}; + +describe("SSE Protocol - Chunking Corner Cases", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("should handle JSON split across chunks", async () => { + const encoder = new TextEncoder(); + const fullEvent = `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`; + + const chunk1 = encoder.encode(fullEvent.substring(0, 30)); + const chunk2 = encoder.encode(fullEvent.substring(30)); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk1, chunk2]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle delimiter split across chunks", async () => { + const encoder = new TextEncoder(); + const event = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + }); + + const chunk1 = encoder.encode(`data: ${event}\n`); + const chunk2 = encoder.encode(`\n`); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk1, chunk2]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle CRLF split across chunks", async () => { + const encoder = new TextEncoder(); + const event = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + }); + + const chunk1 = encoder.encode(`data: ${event}\r`); + const chunk2 = encoder.encode(`\n\r\n`); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk1, chunk2]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle CR-only line endings", async () => { + const encoder = new TextEncoder(); + const event = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + }); + + const chunk = encoder.encode(`data: ${event}\r\r`); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle multiple events in one chunk", async () => { + const encoder = new TextEncoder(); + const event1 = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + }); + const event2 = JSON.stringify({ + chat_id: "test", + seq: "2", + type: "pause_cleared", + }); + + const chunk = encoder.encode(`data: ${event1}\n\ndata: ${event2}\n\n`); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(2); + expect(events[0].seq).toBe("1"); + expect(events[1].seq).toBe("2"); + }); + + it("should handle empty lines between events", async () => { + const encoder = new TextEncoder(); + const event = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + }); + + const chunk = encoder.encode(`\n\ndata: ${event}\n\n\n\n`); + + const events: any[] = []; + const mockFetch = createMockFetch([chunk]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("pause_cleared"); + }); + + it("should handle large payload across many chunks", async () => { + const encoder = new TextEncoder(); + const largeContent = "x".repeat(10000); + const event = JSON.stringify({ + chat_id: "test", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: largeContent }], + }); + const fullEvent = `data: ${event}\n\n`; + + const chunkSize = 100; + const chunks: Uint8Array[] = []; + for (let i = 0; i < fullEvent.length; i += chunkSize) { + chunks.push(encoder.encode(fullEvent.substring(i, i + chunkSize))); + } + + const events: any[] = []; + const mockFetch = createMockFetch(chunks); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 20)); + + expect(events).toHaveLength(1); + expect(events[0].type).toBe("stream_delta"); + expect(events[0].ops[0].text).toBe(largeContent); + }); +}); + +describe("SSE Protocol - Message Variations", () => { + it("should handle context_file message in snapshot", async () => { + const encoder = new TextEncoder(); + const snapshot = { + chat_id: "test", + seq: "0", + type: "snapshot", + thread: { + id: "test", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "context_file", + content: [ + { + file_name: "test.ts", + file_content: "console.log('test');", + line1: 1, + line2: 1, + }, + ], + }, + ], + }; + + const events: any[] = []; + const mockFetch = createMockFetch([ + encoder.encode(`data: ${JSON.stringify(snapshot)}\n\n`), + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].messages).toHaveLength(1); + expect(events[0].messages[0].role).toBe("context_file"); + expect(Array.isArray(events[0].messages[0].content)).toBe(true); + }); + + it("should handle assistant message with all optional fields", async () => { + const encoder = new TextEncoder(); + const snapshot = { + chat_id: "test", + seq: "0", + type: "snapshot", + thread: { + id: "test", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "assistant", + content: "Test response", + message_id: "msg-1", + reasoning_content: "Let me think...", + thinking_blocks: [{ thinking: "Step 1", signature: "sig1" }], + citations: [{ url: "http://example.com", title: "Example" }], + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + extra: { custom_field: "value" }, + finish_reason: "stop", + }, + ], + }; + + const events: any[] = []; + const mockFetch = createMockFetch([ + encoder.encode(`data: ${JSON.stringify(snapshot)}\n\n`), + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + const msg = events[0].messages[0]; + expect(msg.reasoning_content).toBe("Let me think..."); + expect(msg.thinking_blocks).toHaveLength(1); + expect(msg.citations).toHaveLength(1); + expect(msg.usage.total_tokens).toBe(150); + expect(msg.extra.custom_field).toBe("value"); + }); + + it("should handle tool message with tool_failed variations", async () => { + const encoder = new TextEncoder(); + + for (const toolFailed of [true, false, null, undefined]) { + const snapshot = { + chat_id: "test", + seq: "0", + type: "snapshot", + thread: { + id: "test", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "tool", + content: "Result", + message_id: "msg-1", + tool_call_id: "call_1", + tool_failed: toolFailed, + }, + ], + }; + + const events: any[] = []; + const mockFetch = createMockFetch([ + encoder.encode(`data: ${JSON.stringify(snapshot)}\n\n`), + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(events[0].messages[0].tool_failed).toBe(toolFailed); + } + }); + + it("should handle multimodal tool message content", async () => { + const encoder = new TextEncoder(); + const snapshot = { + chat_id: "test", + seq: "0", + type: "snapshot", + thread: { + id: "test", + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + }, + messages: [ + { + role: "tool", + content: [ + { m_type: "text", m_content: "Result text" }, + { m_type: "image/png", m_content: "base64data..." }, + ], + message_id: "msg-1", + tool_call_id: "call_1", + }, + ], + }; + + const events: any[] = []; + const mockFetch = createMockFetch([ + encoder.encode(`data: ${JSON.stringify(snapshot)}\n\n`), + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + const content = events[0].messages[0].content; + expect(Array.isArray(content)).toBe(true); + expect(content[0].m_type).toBe("text"); + expect(content[1].m_type).toBe("image/png"); + }); +}); + +describe("SSE Protocol - Disconnect Handling", () => { + it("should call onDisconnected on normal EOF", async () => { + const onDisconnected = vi.fn(); + const encoder = new TextEncoder(); + + const mockFetch = createMockFetch([ + encoder.encode( + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`, + ), + ]); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: vi.fn(), + onError: vi.fn(), + onDisconnected, + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(onDisconnected).toHaveBeenCalled(); + }); + + it("should call onError on fetch error", async () => { + const onError = vi.fn(); + + const mockFetch = vi.fn().mockRejectedValue(new Error("Network error")); + global.fetch = mockFetch; + + subscribeToChatEvents("test", 8001, { + onEvent: vi.fn(), + onError, + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(onError).toHaveBeenCalled(); + }); + + it("should not call onDisconnected on abort", async () => { + const onDisconnected = vi.fn(); + const encoder = new TextEncoder(); + + const _abortFn: (() => void) | null = null; + const mockFetch = vi.fn().mockImplementation((url, options) => { + const abortController = options.signal; + + return Promise.resolve({ + ok: true, + body: { + getReader: () => ({ + read: vi.fn().mockImplementation(async () => { + if (abortController.aborted) { + throw new DOMException("Aborted", "AbortError"); + } + await new Promise((resolve) => setTimeout(resolve, 100)); + return { + done: false, + value: encoder.encode( + `data: ${JSON.stringify({ + chat_id: "test", + seq: "1", + type: "pause_cleared", + })}\n\n`, + ), + }; + }), + }), + }, + }); + }); + global.fetch = mockFetch; + + const unsubscribe = subscribeToChatEvents("test", 8001, { + onEvent: vi.fn(), + onError: vi.fn(), + onDisconnected, + }); + + await new Promise((resolve) => setTimeout(resolve, 5)); + unsubscribe(); + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(onDisconnected).not.toHaveBeenCalled(); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatStreamingLargeHistory.stress.test.ts b/refact-agent/gui/src/__tests__/chatStreamingLargeHistory.stress.test.ts new file mode 100644 index 000000000..e2ae23825 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatStreamingLargeHistory.stress.test.ts @@ -0,0 +1,224 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { chatReducer } from "../features/Chat/Thread/reducer"; +import { newChatAction, applyChatEvent } from "../features/Chat/Thread/actions"; +import type { Chat } from "../features/Chat/Thread/types"; +import { + subscribeToChatEvents, + type ChatEventEnvelope, + type EventEnvelope, +} from "../services/refact/chatSubscription"; +import type { ChatMessage } from "../services/refact/types"; + +function createSnapshotEvent( + chatId: string, + messages: ChatMessage[], + seq = "1", +): ChatEventEnvelope { + return { + chat_id: chatId, + seq, + type: "snapshot", + thread: { + id: chatId, + title: "Stress Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages, + }; +} + +function createMockReader(chunks: Uint8Array[]) { + let index = 0; + return { + read: vi.fn(() => { + if (index >= chunks.length) { + return Promise.resolve({ done: true, value: undefined }); + } + return Promise.resolve({ done: false, value: chunks[index++] }); + }), + }; +} + +function createMockFetch(chunks: Uint8Array[]) { + return vi.fn().mockResolvedValue({ + ok: true, + body: { + getReader: () => createMockReader(chunks), + }, + }); +} + +describe("Chat Streaming + Large History Stress", () => { + let initialState: Chat; + let chatId: string; + + beforeEach(() => { + vi.clearAllMocks(); + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + initialState = chatReducer(emptyState, newChatAction(undefined)); + chatId = initialState.current_thread_id; + }); + + it("handles large history plus many stream deltas", () => { + const historySize = 1200; + const chunkCount = 1500; + const chunkText = "abcdefghijklmnopqrstuvwxyz"; + + const messages: ChatMessage[] = Array.from( + { length: historySize }, + (_, i) => + i % 2 === 0 + ? { + role: "user", + content: `user-${i}`, + message_id: `u-${i}`, + } + : { + role: "assistant", + content: `assistant-${i}`, + message_id: `a-${i}`, + }, + ); + + const snapshot = createSnapshotEvent(chatId, messages); + let state = chatReducer(initialState, applyChatEvent(snapshot)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "stream-stress", + }), + ); + + const startedAt = Date.now(); + for (let i = 0; i < chunkCount; i++) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(i + 3), + type: "stream_delta", + message_id: "stream-stress", + ops: [{ op: "append_content", text: chunkText }], + }), + ); + } + const elapsedMs = Date.now() - startedAt; + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(chunkCount + 3), + type: "stream_finished", + message_id: "stream-stress", + finish_reason: "stop", + }), + ); + + const runtime = state.threads[chatId]; + if (!runtime) throw new Error(`Runtime not found for chat ${chatId}`); + const finalMessage = + runtime.thread.messages[runtime.thread.messages.length - 1]; + + expect(runtime.thread.messages).toHaveLength(historySize + 1); + expect(finalMessage.role).toBe("assistant"); + expect(finalMessage.content).toBe(chunkText.repeat(chunkCount)); + expect(runtime.streaming).toBe(false); + expect(elapsedMs).toBeLessThan(10_000); + }); + + it("keeps reducer stable under many duplicate seq events", () => { + const snapshot = createSnapshotEvent(chatId, [ + { role: "user", content: "hello", message_id: "u1" }, + ]); + let state = chatReducer(initialState, applyChatEvent(snapshot)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-dup", + }), + ); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-dup", + ops: [{ op: "append_content", text: "base" }], + }), + ); + + for (let i = 0; i < 1000; i++) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-dup", + ops: [{ op: "append_content", text: "_duplicate_should_not_apply" }], + }), + ); + } + + const runtime = state.threads[chatId]; + if (!runtime) throw new Error(`Runtime not found for chat ${chatId}`); + const finalMessage = + runtime.thread.messages[runtime.thread.messages.length - 1]; + expect(finalMessage.content).toBe("base"); + expect(runtime.last_applied_seq).toBe("3"); + }); + + it("parses many SSE events in a single payload", async () => { + const eventCount = 2000; + const encoder = new TextEncoder(); + const events: EventEnvelope[] = []; + + const payload = Array.from({ length: eventCount }, (_, i) => { + const event: EventEnvelope = { + chat_id: "stress-chat", + seq: String(i + 1), + type: "pause_cleared", + }; + return `data: ${JSON.stringify(event)}\n\n`; + }).join(""); + + global.fetch = createMockFetch([encoder.encode(payload)]); + + subscribeToChatEvents("stress-chat", 8001, { + onEvent: (e) => events.push(e), + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 40)); + + expect(events).toHaveLength(eventCount); + expect(events[0].seq).toBe("1"); + expect(events[eventCount - 1].seq).toBe(String(eventCount)); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatStreamingMultiChat.stress.test.ts b/refact-agent/gui/src/__tests__/chatStreamingMultiChat.stress.test.ts new file mode 100644 index 000000000..12a083851 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatStreamingMultiChat.stress.test.ts @@ -0,0 +1,456 @@ +import { describe, it, expect, beforeEach } from "vitest"; +import { chatReducer } from "../features/Chat/Thread/reducer"; +import { newChatAction, applyChatEvent } from "../features/Chat/Thread/actions"; +import type { Chat } from "../features/Chat/Thread/types"; +import type { ChatEventEnvelope } from "../services/refact/chatSubscription"; +import type { ChatMessage } from "../services/refact/types"; + +function createSnapshotEvent( + chatId: string, + messages: ChatMessage[], + seq = "1", +): ChatEventEnvelope { + return { + chat_id: chatId, + seq, + type: "snapshot", + thread: { + id: chatId, + title: "Stress Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages, + }; +} + +function makeHistory(count: number): ChatMessage[] { + return Array.from({ length: count }, (_, i) => + i % 2 === 0 + ? { role: "user", content: `user-${i}`, message_id: `u-${i}` } + : { role: "assistant", content: `assistant-${i}`, message_id: `a-${i}` }, + ); +} + +describe("Multi-Chat Streaming Stress Tests", () => { + let baseState: Chat; + + beforeEach(() => { + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + baseState = chatReducer(emptyState, newChatAction(undefined)); + }); + + it("handles 3 concurrent streaming chats without data loss", () => { + const CHAT_COUNT = 3; + const HISTORY_SIZE = 1200; + const CHUNKS_PER_CHAT = 500; + const CHUNK_TEXT = "Hello world streaming text. "; + + const chatIds: string[] = []; + let state = baseState; + + for (let c = 0; c < CHAT_COUNT; c++) { + state = chatReducer(state, newChatAction(undefined)); + chatIds.push(state.current_thread_id); + } + + for (const chatId of chatIds) { + const snapshot = createSnapshotEvent(chatId, makeHistory(HISTORY_SIZE)); + state = chatReducer(state, applyChatEvent(snapshot)); + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: `stream-${chatId}`, + }), + ); + } + + for (const chatId of chatIds) { + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + expect(rt.streaming).toBe(true); + expect(rt.waiting_for_response).toBe(true); + } + + const startedAt = Date.now(); + + for (let i = 0; i < CHUNKS_PER_CHAT; i++) { + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(i + 3), + type: "stream_delta", + message_id: `stream-${chatId}`, + ops: [{ op: "append_content", text: CHUNK_TEXT }], + }), + ); + } + } + + const streamElapsedMs = Date.now() - startedAt; + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(CHUNKS_PER_CHAT + 3), + type: "stream_finished", + message_id: `stream-${chatId}`, + finish_reason: "stop", + }), + ); + } + + for (const chatId of chatIds) { + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + const msgs = rt.thread.messages; + expect(msgs).toHaveLength(HISTORY_SIZE + 1); + + const lastMsg = msgs[msgs.length - 1]; + expect(lastMsg.role).toBe("assistant"); + expect(lastMsg.content).toBe(CHUNK_TEXT.repeat(CHUNKS_PER_CHAT)); + + expect(rt.streaming).toBe(false); + expect(rt.waiting_for_response).toBe(false); + expect(rt.snapshot_received).toBe(true); + } + + expect(streamElapsedMs).toBeLessThan(15_000); + }); + + it("handles interleaved deltas with reasoning + tool_calls across 3 chats", () => { + const CHAT_COUNT = 3; + const HISTORY_SIZE = 200; + const CHUNKS = 100; + + const chatIds: string[] = []; + let state = baseState; + + for (let c = 0; c < CHAT_COUNT; c++) { + state = chatReducer(state, newChatAction(undefined)); + chatIds.push(state.current_thread_id); + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent(createSnapshotEvent(chatId, makeHistory(HISTORY_SIZE))), + ); + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: `stream-${chatId}`, + }), + ); + } + + for (let i = 0; i < CHUNKS; i++) { + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(i * 2 + 3), + type: "stream_delta", + message_id: `stream-${chatId}`, + ops: [ + { op: "append_content", text: `c${i} ` }, + { op: "append_reasoning", text: `r${i} ` }, + ], + }), + ); + + if (i === CHUNKS - 1) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(i * 2 + 4), + type: "stream_delta", + message_id: `stream-${chatId}`, + ops: [ + { + op: "set_tool_calls", + tool_calls: [ + { + id: `tc-${chatId}`, + type: "function", + function: { + name: "cat", + arguments: '{"paths":"test.ts"}', + }, + }, + ], + }, + ], + }), + ); + } + } + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(CHUNKS * 2 + 5), + type: "stream_finished", + message_id: `stream-${chatId}`, + finish_reason: "tool_calls", + }), + ); + } + + for (const chatId of chatIds) { + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + const lastMsg = rt.thread.messages[rt.thread.messages.length - 1]; + + const expectedContent = Array.from( + { length: CHUNKS }, + (_, i) => `c${i} `, + ).join(""); + expect(lastMsg.content).toBe(expectedContent); + + if ("reasoning_content" in lastMsg) { + const expectedReasoning = Array.from( + { length: CHUNKS }, + (_, i) => `r${i} `, + ).join(""); + expect(lastMsg.reasoning_content).toBe(expectedReasoning); + } + + if ("tool_calls" in lastMsg && lastMsg.tool_calls) { + expect(lastMsg.tool_calls).toHaveLength(1); + expect(lastMsg.tool_calls[0].id).toBe(`tc-${chatId}`); + } + } + }); + + it("handles large batched ops (coalesced deltas) correctly", () => { + let state = baseState; + state = chatReducer(state, newChatAction(undefined)); + const chatId = state.current_thread_id; + + state = chatReducer( + state, + applyChatEvent(createSnapshotEvent(chatId, makeHistory(100))), + ); + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "stream-batch", + }), + ); + + const batchedOps = Array.from({ length: 200 }, (_, i) => ({ + op: "append_content" as const, + text: `chunk${i}-`, + })); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "stream-batch", + ops: batchedOps, + }), + ); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "4", + type: "stream_finished", + message_id: "stream-batch", + finish_reason: "stop", + }), + ); + + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + const lastMsg = rt.thread.messages[rt.thread.messages.length - 1]; + const expectedContent = Array.from( + { length: 200 }, + (_, i) => `chunk${i}-`, + ).join(""); + expect(lastMsg.content).toBe(expectedContent); + }); + + it("correctly skips duplicate seq events across all 3 chats", () => { + const CHAT_COUNT = 3; + const chatIds: string[] = []; + let state = baseState; + + for (let c = 0; c < CHAT_COUNT; c++) { + state = chatReducer(state, newChatAction(undefined)); + chatIds.push(state.current_thread_id); + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent( + createSnapshotEvent(chatId, [ + { role: "user", content: "hi", message_id: "u1" }, + ]), + ), + ); + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: `s-${chatId}`, + }), + ); + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: `s-${chatId}`, + ops: [{ op: "append_content", text: "real" }], + }), + ); + + for (let dup = 0; dup < 50; dup++) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: `s-${chatId}`, + ops: [{ op: "append_content", text: "_dup" }], + }), + ); + } + } + + for (const chatId of chatIds) { + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + const lastMsg = rt.thread.messages[rt.thread.messages.length - 1]; + expect(lastMsg.content).toBe("real"); + expect(rt.last_applied_seq).toBe("3"); + } + }); + + it("handles snapshot mid-stream (reconnect scenario) for one of 3 chats", () => { + const CHAT_COUNT = 3; + const chatIds: string[] = []; + let state = baseState; + + for (let c = 0; c < CHAT_COUNT; c++) { + state = chatReducer(state, newChatAction(undefined)); + chatIds.push(state.current_thread_id); + } + + for (const chatId of chatIds) { + state = chatReducer( + state, + applyChatEvent(createSnapshotEvent(chatId, makeHistory(50))), + ); + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: `s-${chatId}`, + }), + ); + for (let i = 0; i < 10; i++) { + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: String(i + 3), + type: "stream_delta", + message_id: `s-${chatId}`, + ops: [{ op: "append_content", text: "x" }], + }), + ); + } + } + + const reconnectChatId = chatIds[1]; + const freshMessages: ChatMessage[] = [ + ...makeHistory(50), + { + role: "assistant", + content: "full recovered content", + message_id: `s-${reconnectChatId}`, + }, + ]; + + const reconnectSnapshot = createSnapshotEvent( + reconnectChatId, + freshMessages, + "0", + ); + if (reconnectSnapshot.type === "snapshot") { + reconnectSnapshot.runtime.state = "generating"; + } + state = chatReducer(state, applyChatEvent(reconnectSnapshot)); + + const reconnectedRt = state.threads[reconnectChatId]; + if (!reconnectedRt) + throw new Error(`Runtime not found for chat ${reconnectChatId}`); + expect(reconnectedRt.thread.messages).toHaveLength(51); + expect( + reconnectedRt.thread.messages[reconnectedRt.thread.messages.length - 1] + .content, + ).toBe("full recovered content"); + expect(reconnectedRt.streaming).toBe(true); + + for (const chatId of chatIds) { + if (chatId === reconnectChatId) continue; + const rt = state.threads[chatId]; + if (!rt) throw new Error(`Runtime not found for chat ${chatId}`); + expect(rt.streaming).toBe(true); + const lastMsg = rt.thread.messages[rt.thread.messages.length - 1]; + expect(lastMsg.content).toBe("x".repeat(10)); + } + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatSubscription.test.ts b/refact-agent/gui/src/__tests__/chatSubscription.test.ts new file mode 100644 index 000000000..d3ec16fe0 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatSubscription.test.ts @@ -0,0 +1,263 @@ +/** + * Chat Subscription Service Tests + * + * Tests for the fetch-based SSE chat subscription system. + * + * Run with: npm run test:no-watch -- chatSubscription + */ + +/* eslint-disable @typescript-eslint/require-await */ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import { + subscribeToChatEvents, + applyDeltaOps, + type DeltaOp, +} from "../services/refact/chatSubscription"; +import type { AssistantMessage } from "../services/refact/types"; + +type TestMessage = AssistantMessage & { + reasoning_content?: string; + thinking_blocks?: unknown[]; + citations?: unknown[]; + usage?: unknown; +}; + +const mockFetch = vi.fn(); + +describe("chatSubscription", () => { + describe("applyDeltaOps", () => { + it("should append content to string content", () => { + const message: TestMessage = { + role: "assistant", + content: "Hello", + }; + + const ops: DeltaOp[] = [{ op: "append_content", text: " world" }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.content).toBe("Hello world"); + }); + + it("should initialize content if not a string", () => { + const message: TestMessage = { + role: "assistant", + content: undefined as unknown as string, + }; + + const ops: DeltaOp[] = [{ op: "append_content", text: "Hello" }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.content).toBe("Hello"); + }); + + it("should append reasoning content", () => { + const message: TestMessage = { + role: "assistant", + content: "", + reasoning_content: "Step 1: ", + }; + + const ops: DeltaOp[] = [{ op: "append_reasoning", text: "analyze" }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.reasoning_content).toBe("Step 1: analyze"); + }); + + it("should initialize reasoning content if empty", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const ops: DeltaOp[] = [{ op: "append_reasoning", text: "thinking" }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.reasoning_content).toBe("thinking"); + }); + + it("should set tool calls", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const toolCalls = [ + { id: "call_1", function: { name: "test", arguments: "{}" } }, + ]; + const ops: DeltaOp[] = [{ op: "set_tool_calls", tool_calls: toolCalls }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.tool_calls).toEqual(toolCalls); + }); + + it("should set thinking blocks", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const blocks = [{ thinking: "reasoning here" }]; + const ops: DeltaOp[] = [{ op: "set_thinking_blocks", blocks }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.thinking_blocks).toEqual(blocks); + }); + + it("should add citations", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const citation1 = { url: "http://example.com/1" }; + const citation2 = { url: "http://example.com/2" }; + const ops: DeltaOp[] = [ + { op: "add_citation", citation: citation1 }, + { op: "add_citation", citation: citation2 }, + ]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.citations).toEqual([citation1, citation2]); + }); + + it("should set usage", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const usage = { prompt_tokens: 100, completion_tokens: 50 }; + const ops: DeltaOp[] = [{ op: "set_usage", usage }]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.usage).toEqual(usage); + }); + + it("should apply multiple ops in sequence", () => { + const message: TestMessage = { + role: "assistant", + content: "", + }; + + const ops: DeltaOp[] = [ + { op: "append_content", text: "Hello" }, + { op: "append_content", text: " " }, + { op: "append_content", text: "world" }, + { op: "append_reasoning", text: "thinking..." }, + { + op: "set_tool_calls", + tool_calls: [ + { id: "1", function: { name: "test", arguments: "{}" } }, + ], + }, + ]; + + const result = applyDeltaOps(message, ops) as TestMessage; + expect(result.content).toBe("Hello world"); + expect(result.reasoning_content).toBe("thinking..."); + expect(result.tool_calls).toHaveLength(1); + }); + }); + + describe("subscribeToChatEvents", () => { + beforeEach(() => { + global.fetch = mockFetch; + mockFetch.mockReset(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it("should make fetch request with correct URL and headers", () => { + const chatId = "test-chat-123"; + const port = 8001; + const apiKey = "test-key"; + + mockFetch.mockResolvedValueOnce({ + ok: true, + body: { + getReader: () => ({ + read: vi.fn().mockResolvedValue({ done: true }), + }), + }, + }); + + subscribeToChatEvents( + chatId, + port, + { + onEvent: vi.fn(), + onError: vi.fn(), + }, + apiKey, + ); + + expect(mockFetch).toHaveBeenCalledWith( + `http://127.0.0.1:${port}/v1/chats/subscribe?chat_id=${chatId}`, + expect.objectContaining({ + method: "GET", + headers: { Authorization: "Bearer test-key" }, + }), + ); + }); + + it("should normalize CRLF line endings", async () => { + const onEvent = vi.fn(); + const encoder = new TextEncoder(); + + const events = + 'data: {"type":"snapshot","seq":"1","chat_id":"test"}\r\n\r\n'; + + mockFetch.mockResolvedValueOnce({ + ok: true, + body: { + getReader: () => { + let called = false; + return { + read: async () => { + if (called) return { done: true, value: undefined }; + called = true; + return { done: false, value: encoder.encode(events) }; + }, + }; + }, + }, + }); + + subscribeToChatEvents("test", 8001, { + onEvent, + onError: vi.fn(), + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(onEvent).toHaveBeenCalledWith( + expect.objectContaining({ type: "snapshot" }), + ); + }); + + it("should call onDisconnected on normal stream close", async () => { + const onDisconnected = vi.fn(); + + mockFetch.mockResolvedValueOnce({ + ok: true, + body: { + getReader: () => ({ + read: vi.fn().mockResolvedValue({ done: true }), + }), + }, + }); + + subscribeToChatEvents("test", 8001, { + onEvent: vi.fn(), + onError: vi.fn(), + onDisconnected, + }); + + await new Promise((resolve) => setTimeout(resolve, 10)); + + expect(onDisconnected).toHaveBeenCalled(); + }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/chatValidation.test.ts b/refact-agent/gui/src/__tests__/chatValidation.test.ts new file mode 100644 index 000000000..4ec433a75 --- /dev/null +++ b/refact-agent/gui/src/__tests__/chatValidation.test.ts @@ -0,0 +1,166 @@ +/* eslint-disable @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-argument */ +import { describe, test, expect } from "vitest"; +import { isLspChatMessage } from "../services/refact/chat"; +import { applyDeltaOps } from "../services/refact/chatSubscription"; +import type { ChatMessage } from "../services/refact/types"; + +describe("Chat Validation Fixes", () => { + describe("isLspChatMessage - tool messages", () => { + test("accepts tool message with string content", () => { + const msg = { + role: "tool", + tool_call_id: "call_123", + content: "Tool result text", + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + + test("accepts tool message with array content", () => { + const msg = { + role: "tool", + tool_call_id: "call_123", + content: [ + { m_type: "text", m_content: "Result text" }, + { m_type: "image/png", m_content: "base64data" }, + ], + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + + test("rejects tool message without tool_call_id", () => { + const msg = { + role: "tool", + content: "Some text", + }; + expect(isLspChatMessage(msg)).toBe(false); + }); + }); + + describe("isLspChatMessage - diff messages", () => { + test("accepts diff message with array content", () => { + const msg = { + role: "diff", + content: [ + { + file_name: "test.ts", + file_action: "M", + line1: 1, + line2: 10, + chunks: "diff content", + }, + ], + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + + test("rejects diff message with non-array content", () => { + const msg = { + role: "diff", + content: "not an array", + }; + expect(isLspChatMessage(msg)).toBe(false); + }); + }); + + describe("isLspChatMessage - multimodal user messages", () => { + test("accepts user message with array content", () => { + const msg = { + role: "user", + content: [ + { type: "text", text: "What is this?" }, + { + type: "image_url", + image_url: { url: "data:image/png;base64,..." }, + }, + ], + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + }); + + describe("isLspChatMessage - standard messages", () => { + test("accepts assistant message with null content", () => { + const msg = { + role: "assistant", + content: null, + tool_calls: [ + { + id: "call_1", + function: { name: "test", arguments: "{}" }, + index: 0, + }, + ], + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + + test("accepts assistant message with string content", () => { + const msg = { + role: "assistant", + content: "Hello world", + }; + expect(isLspChatMessage(msg)).toBe(true); + }); + }); +}); + +describe("applyDeltaOps - merge_extra", () => { + test("merges extra fields into message", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg_1", + }; + + const result = applyDeltaOps(message, [ + { op: "merge_extra", extra: { custom_field: "value1" } }, + ]); + + expect(result).toHaveProperty("extra"); + expect((result as any).extra.custom_field).toBe("value1"); + }); + + test("preserves existing extra fields when merging", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg_1", + extra: { existing: "kept" }, + } as any; + + const result = applyDeltaOps(message, [ + { op: "merge_extra", extra: { new_field: "added" } }, + ]); + + expect((result as any).extra.existing).toBe("kept"); + expect((result as any).extra.new_field).toBe("added"); + }); + + test("overwrites existing extra fields with same key", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg_1", + extra: { field: "old" }, + } as any; + + const result = applyDeltaOps(message, [ + { op: "merge_extra", extra: { field: "new" } }, + ]); + + expect((result as any).extra.field).toBe("new"); + }); + + test("handles unknown delta ops gracefully", () => { + const message: ChatMessage = { + role: "assistant", + content: "test", + message_id: "msg_1", + }; + + const result = applyDeltaOps(message, [{ op: "unknown_op" } as any]); + + expect(result).toBeDefined(); + expect(result.content).toBe("test"); + }); +}); diff --git a/refact-agent/gui/src/__tests__/coinBalanceSlice.test.ts b/refact-agent/gui/src/__tests__/coinBalanceSlice.test.ts new file mode 100644 index 000000000..4cf7fdba7 --- /dev/null +++ b/refact-agent/gui/src/__tests__/coinBalanceSlice.test.ts @@ -0,0 +1,162 @@ +import { describe, it, expect } from "vitest"; +import { configureStore } from "@reduxjs/toolkit"; +import { + coinBallanceSlice, + selectBalance, +} from "../features/CoinBalance/coinBalanceSlice"; +import { applyChatEvent } from "../features/Chat/Thread/actions"; +import type { ChatEventEnvelope } from "../services/refact/chatSubscription"; + +function createTestStore() { + return configureStore({ + reducer: { + coins: coinBallanceSlice.reducer, + }, + }); +} + +describe("coinBalanceSlice", () => { + describe("extractMeteringBalance from SSE events", () => { + it("should extract metering_balance from stream_delta merge_extra ops", () => { + const store = createTestStore(); + + const event: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [ + { op: "append_content", text: "Hello" }, + { op: "merge_extra", extra: { metering_balance: 2553194 } }, + ], + }; + store.dispatch(applyChatEvent(event)); + + expect(selectBalance({ coins: store.getState().coins })).toBe(2553194); + }); + + it("should extract metering_balance from stream_delta merge_extra (top level simulation)", () => { + const store = createTestStore(); + + // metering_balance comes via merge_extra op, not top-level event field + const event: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "merge_extra", extra: { metering_balance: 1000000 } }], + }; + store.dispatch(applyChatEvent(event)); + + expect(selectBalance({ coins: store.getState().coins })).toBe(1000000); + }); + + it("should extract metering_balance from stream_delta with usage in merge_extra", () => { + const store = createTestStore(); + + // Usage with metering_balance comes via merge_extra + const event: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [ + { + op: "merge_extra", + extra: { + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + metering_balance: 999999, + }, + }, + ], + }; + store.dispatch(applyChatEvent(event)); + + expect(selectBalance({ coins: store.getState().coins })).toBe(999999); + }); + + it("should not update balance if metering_balance is missing", () => { + const store = createTestStore(); + + // Set initial balance + const event1: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "merge_extra", extra: { metering_balance: 5000 } }], + }; + store.dispatch(applyChatEvent(event1)); + + // Event without metering_balance should not change it + const event2: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "2", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "More text" }], + }; + store.dispatch(applyChatEvent(event2)); + + expect(selectBalance({ coins: store.getState().coins })).toBe(5000); + }); + + it("should update balance with latest value from multiple ops", () => { + const store = createTestStore(); + + const event: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [ + { op: "merge_extra", extra: { metering_balance: 1000 } }, + { op: "merge_extra", extra: { other_field: "value" } }, + ], + }; + store.dispatch(applyChatEvent(event)); + + // First merge_extra with metering_balance wins + expect(selectBalance({ coins: store.getState().coins })).toBe(1000); + }); + + it("should handle merge_extra with other metering fields but no balance", () => { + const store = createTestStore(); + + // Set initial balance + const event1: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "1", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "merge_extra", extra: { metering_balance: 5000 } }], + }; + store.dispatch(applyChatEvent(event1)); + + // Event with other metering fields but no balance + const event2: ChatEventEnvelope = { + chat_id: "test-chat", + seq: "2", + type: "stream_delta", + message_id: "msg-1", + ops: [ + { + op: "merge_extra", + extra: { + metering_prompt_tokens_n: 100, + metering_generated_tokens_n: 50, + }, + }, + ], + }; + store.dispatch(applyChatEvent(event2)); + + // Balance should remain unchanged + expect(selectBalance({ coins: store.getState().coins })).toBe(5000); + }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/ensureSubscriptionConnected.test.tsx b/refact-agent/gui/src/__tests__/ensureSubscriptionConnected.test.tsx new file mode 100644 index 000000000..8347492b3 --- /dev/null +++ b/refact-agent/gui/src/__tests__/ensureSubscriptionConnected.test.tsx @@ -0,0 +1,165 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { renderHook } from "@testing-library/react"; +import { Provider } from "react-redux"; +import { configureStore } from "@reduxjs/toolkit"; +import { useEnsureSubscriptionConnected } from "../hooks/useEnsureSubscriptionConnected"; +import { chatReducer } from "../features/Chat/Thread/reducer"; +import { reducer as configReducer } from "../features/Config/configSlice"; +import type { Chat, ChatThreadRuntime } from "../features/Chat/Thread/types"; +import React from "react"; + +const createThreadRuntime = ( + overrides: Partial & { + thread: ChatThreadRuntime["thread"]; + }, +): ChatThreadRuntime => ({ + streaming: false, + waiting_for_response: false, + snapshot_received: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, + }, + task_widget_expanded: false, + ...overrides, +}); + +const chatInitialState: Chat = { + current_thread_id: "", + open_thread_ids: [], + threads: {}, + system_prompt: {}, + tool_use: "agent", + sse_refresh_requested: null, + stream_version: 0, +}; + +const createTestStore = (chatOverrides?: Partial) => + configureStore({ + reducer: { + chat: chatReducer, + config: configReducer, + }, + preloadedState: chatOverrides + ? { chat: { ...chatInitialState, ...chatOverrides } } + : undefined, + }); + +describe("useEnsureSubscriptionConnected", () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it("returns isConnected true if snapshot already received", () => { + const store = createTestStore({ + current_thread_id: "test-chat-id", + open_thread_ids: ["test-chat-id"], + threads: { + "test-chat-id": createThreadRuntime({ + thread: { + id: "test-chat-id", + messages: [], + model: "", + title: undefined, + new_chat_suggested: { wasSuggested: false }, + }, + snapshot_received: true, + }), + }, + }); + + const wrapper = ({ children }: { children: React.ReactNode }) => ( + {children} + ); + + const { result } = renderHook( + () => useEnsureSubscriptionConnected("test-chat-id"), + { wrapper }, + ); + + expect(result.current.isConnected).toBe(true); + expect(result.current.isConnecting).toBe(false); + }); + + it("provides ensureConnected function", () => { + const store = createTestStore({ + current_thread_id: "test-chat-id", + open_thread_ids: ["test-chat-id"], + threads: { + "test-chat-id": createThreadRuntime({ + thread: { + id: "test-chat-id", + messages: [{ role: "user", content: "hello" }], + model: "", + title: undefined, + new_chat_suggested: { wasSuggested: false }, + }, + snapshot_received: false, + }), + }, + }); + + const wrapper = ({ children }: { children: React.ReactNode }) => ( + {children} + ); + + const { result } = renderHook( + () => useEnsureSubscriptionConnected("test-chat-id"), + { wrapper }, + ); + + expect(typeof result.current.ensureConnected).toBe("function"); + }); + + it("returns isConnected false when no snapshot received", () => { + const store = createTestStore({ + current_thread_id: "test-chat-id", + open_thread_ids: ["test-chat-id"], + threads: { + "test-chat-id": createThreadRuntime({ + thread: { + id: "test-chat-id", + messages: [], + model: "", + title: undefined, + new_chat_suggested: { wasSuggested: false }, + }, + snapshot_received: false, + }), + }, + }); + + const wrapper = ({ children }: { children: React.ReactNode }) => ( + {children} + ); + + const { result } = renderHook( + () => useEnsureSubscriptionConnected("test-chat-id"), + { wrapper }, + ); + + expect(result.current.isConnected).toBe(false); + expect(result.current.isConnecting).toBe(true); + }); + + it("returns isConnected true when chatId is null", () => { + const store = createTestStore(); + + const wrapper = ({ children }: { children: React.ReactNode }) => ( + {children} + ); + + const { result } = renderHook(() => useEnsureSubscriptionConnected(null), { + wrapper, + }); + + expect(result.current.isConnected).toBe(true); + }); +}); diff --git a/refact-agent/gui/src/__tests__/historyTree.test.ts b/refact-agent/gui/src/__tests__/historyTree.test.ts new file mode 100644 index 000000000..ab2accaea --- /dev/null +++ b/refact-agent/gui/src/__tests__/historyTree.test.ts @@ -0,0 +1,273 @@ +import { describe, it, expect } from "vitest"; +import { + buildHistoryTree, + type ChatHistoryItem, +} from "../features/History/historySlice"; + +const createItem = ( + id: string, + overrides: Partial = {}, +): ChatHistoryItem => ({ + id, + title: `Chat ${id}`, + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + messages: [], + boost_reasoning: false, + context_tokens_cap: undefined, + include_project_info: true, + increase_max_tokens: false, + + project_name: undefined, + isTitleGenerated: true, + createdAt: "2024-01-01T00:00:00Z", + last_user_message_id: "", + updatedAt: overrides.updatedAt ?? "2024-01-01T00:00:00Z", + ...overrides, +}); + +describe("buildHistoryTree", () => { + describe("basic tree building", () => { + it("should return empty array for empty input", () => { + const result = buildHistoryTree({}); + expect(result).toEqual([]); + }); + + it("should return single root for single item", () => { + const chats = { a: createItem("a") }; + const result = buildHistoryTree(chats); + expect(result).toHaveLength(1); + expect(result[0].id).toBe("a"); + expect(result[0].children).toHaveLength(0); + }); + + it("should return multiple roots for unrelated items", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-03T00:00:00Z" }), + b: createItem("b", { updatedAt: "2024-01-02T00:00:00Z" }), + c: createItem("c", { updatedAt: "2024-01-01T00:00:00Z" }), + }; + const result = buildHistoryTree(chats); + expect(result).toHaveLength(3); + expect(result[0].id).toBe("a"); + expect(result[1].id).toBe("b"); + expect(result[2].id).toBe("c"); + }); + }); + + describe("handoff chains", () => { + it("should handle single handoff (A -> B)", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "handoff", + updatedAt: "2024-01-02T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("b"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("a"); + }); + + it("should handle double handoff chain (A -> B -> C)", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "handoff", + updatedAt: "2024-01-02T00:00:00Z", + }), + c: createItem("c", { + parent_id: "b", + link_type: "handoff", + updatedAt: "2024-01-03T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("c"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("b"); + expect(result[0].children[0].children).toHaveLength(1); + expect(result[0].children[0].children[0].id).toBe("a"); + }); + + it("should handle triple handoff chain (A -> B -> C -> D)", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "handoff", + updatedAt: "2024-01-02T00:00:00Z", + }), + c: createItem("c", { + parent_id: "b", + link_type: "handoff", + updatedAt: "2024-01-03T00:00:00Z", + }), + d: createItem("d", { + parent_id: "c", + link_type: "handoff", + updatedAt: "2024-01-04T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("d"); + expect(result[0].children[0].id).toBe("c"); + expect(result[0].children[0].children[0].id).toBe("b"); + expect(result[0].children[0].children[0].children[0].id).toBe("a"); + }); + }); + + describe("subagent links", () => { + it("should handle subagent as child of parent", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "subagent", + updatedAt: "2024-01-02T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("a"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("b"); + }); + }); + + describe("mixed links", () => { + it("should handle handoff with subagent child", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "handoff", + updatedAt: "2024-01-02T00:00:00Z", + }), + c: createItem("c", { + parent_id: "b", + link_type: "subagent", + updatedAt: "2024-01-03T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("b"); + expect(result[0].children).toHaveLength(2); + const childIds = result[0].children.map((c) => c.id).sort(); + expect(childIds).toEqual(["a", "c"]); + }); + }); + + describe("cycle prevention", () => { + it("should not create cycles with self-reference", () => { + const chats = { + a: createItem("a", { parent_id: "a", link_type: "handoff" }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("a"); + expect(result[0].children).toHaveLength(0); + }); + + it("should not create cycles with mutual reference", () => { + const chats = { + a: createItem("a", { + parent_id: "b", + link_type: "handoff", + updatedAt: "2024-01-02T00:00:00Z", + }), + b: createItem("b", { + parent_id: "a", + link_type: "handoff", + updatedAt: "2024-01-01T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].children).toHaveLength(0); + }); + }); + + describe("task filtering", () => { + it("should exclude items with task_id", () => { + const chats = { + a: createItem("a"), + b: createItem("b", { task_id: "task-1" }), + c: createItem("c"), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(2); + expect(result.map((r) => r.id).sort()).toEqual(["a", "c"]); + }); + }); + + describe("sorting", () => { + it("should sort roots by updatedAt descending", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-01T00:00:00Z" }), + b: createItem("b", { updatedAt: "2024-01-03T00:00:00Z" }), + c: createItem("c", { updatedAt: "2024-01-02T00:00:00Z" }), + }; + const result = buildHistoryTree(chats); + + expect(result[0].id).toBe("b"); + expect(result[1].id).toBe("c"); + expect(result[2].id).toBe("a"); + }); + + it("should sort children by updatedAt descending", () => { + const chats = { + a: createItem("a", { updatedAt: "2024-01-04T00:00:00Z" }), + b: createItem("b", { + parent_id: "a", + link_type: "subagent", + updatedAt: "2024-01-01T00:00:00Z", + }), + c: createItem("c", { + parent_id: "a", + link_type: "subagent", + updatedAt: "2024-01-03T00:00:00Z", + }), + d: createItem("d", { + parent_id: "a", + link_type: "subagent", + updatedAt: "2024-01-02T00:00:00Z", + }), + }; + const result = buildHistoryTree(chats); + + expect(result[0].children[0].id).toBe("c"); + expect(result[0].children[1].id).toBe("d"); + expect(result[0].children[2].id).toBe("b"); + }); + }); + + describe("missing parent handling", () => { + it("should treat item as root if parent_id not found", () => { + const chats = { + a: createItem("a", { parent_id: "nonexistent", link_type: "handoff" }), + }; + const result = buildHistoryTree(chats); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("a"); + }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/integration/DeleteChat.test.tsx b/refact-agent/gui/src/__tests__/integration/DeleteChat.test.tsx new file mode 100644 index 000000000..e035020ed --- /dev/null +++ b/refact-agent/gui/src/__tests__/integration/DeleteChat.test.tsx @@ -0,0 +1,100 @@ +import { render, waitFor } from "../../utils/test-utils"; +import { describe, expect, it } from "vitest"; +import { + server, + goodUser, + goodPing, + chatLinks, + telemetryChat, + telemetryNetwork, + goodCaps, + emptyTrajectories, + trajectorySave, + trajectoryDelete, + chatSessionSubscribe, + chatSessionCommand, + chatSessionAbort, + sidebarSubscribe, + emptyTasks, +} from "../../utils/mockServer"; +import { InnerApp } from "../../features/App"; +import { HistoryState } from "../../features/History/historySlice"; + +describe("Delete a Chat form history", () => { + it("can delete a chat", async () => { + server.use( + goodUser, + goodPing, + chatLinks, + telemetryChat, + telemetryNetwork, + goodCaps, + emptyTrajectories, + trajectorySave, + trajectoryDelete, + chatSessionSubscribe, + chatSessionCommand, + chatSessionAbort, + sidebarSubscribe, + emptyTasks, + ); + const now = new Date().toISOString(); + const history: HistoryState = { + chats: { + abc123: { + title: "Test title", + isTitleGenerated: false, + messages: [], + id: "abc123", + model: "foo", + tool_use: "quick", + new_chat_suggested: { + wasSuggested: false, + }, + createdAt: now, + updatedAt: now, + }, + }, + isLoading: false, + loadError: null, + pagination: { cursor: null, hasMore: false }, + }; + const { user, store, ...app } = render(, { + preloadedState: { + history, + teams: { + group: { id: "123", name: "test" }, + }, + pages: [{ name: "history" }], + config: { + apiKey: "test", + lspPort: 8001, + themeProps: {}, + host: "vscode", + addressURL: "Refact", + }, + }, + }); + + const itemTitleToDelete = "Test title"; + + const restoreButtonText = await app.findByText(itemTitleToDelete); + + // Find the delete button - in compact view, it uses aria-label="Delete" + let container = restoreButtonText.parentElement; + while (container && !container.querySelector('[aria-label="Delete"]')) { + container = container.parentElement; + } + const deleteButton = container?.querySelector('[aria-label="Delete"]'); + + expect(deleteButton).not.toBeNull(); + + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + await user.click(deleteButton!); + + // Wait for the deletion to be processed + await waitFor(() => { + expect(store.getState().history.chats).toEqual({}); + }); + }); +}); diff --git a/refact-agent/gui/src/__tests__/UserSurvey.test.tsx b/refact-agent/gui/src/__tests__/integration/UserSurvey.test.tsx similarity index 84% rename from refact-agent/gui/src/__tests__/UserSurvey.test.tsx rename to refact-agent/gui/src/__tests__/integration/UserSurvey.test.tsx index 86f48f919..2bea5b56c 100644 --- a/refact-agent/gui/src/__tests__/UserSurvey.test.tsx +++ b/refact-agent/gui/src/__tests__/integration/UserSurvey.test.tsx @@ -1,6 +1,6 @@ import { http, HttpResponse } from "msw"; -import { QUESTIONS_STUB } from "../__fixtures__"; -import { render } from "../utils/test-utils"; +import { QUESTIONS_STUB } from "../../__fixtures__"; +import { render } from "../../utils/test-utils"; import { describe, expect, test } from "vitest"; import { server, @@ -14,8 +14,13 @@ import { chatLinks, telemetryChat, telemetryNetwork, -} from "../utils/mockServer"; -import { InnerApp } from "../features/App"; + emptyTrajectories, + trajectorySave, + chatSessionSubscribe, + chatSessionCommand, + chatSessionAbort, +} from "../../utils/mockServer"; +import { InnerApp } from "../../features/App"; const userMock = http.get( "https://www.smallcloud.ai/v1/login", @@ -66,6 +71,11 @@ describe("Start a new chat", () => { chatLinks, telemetryChat, telemetryNetwork, + emptyTrajectories, + trajectorySave, + chatSessionSubscribe, + chatSessionCommand, + chatSessionAbort, ); const { user, ...app } = render(, { diff --git a/refact-agent/gui/src/__tests__/integration/chatSubscription.integration.test.ts b/refact-agent/gui/src/__tests__/integration/chatSubscription.integration.test.ts new file mode 100644 index 000000000..ea0e4d0dd --- /dev/null +++ b/refact-agent/gui/src/__tests__/integration/chatSubscription.integration.test.ts @@ -0,0 +1,454 @@ +/** + * Chat Subscription Integration Tests + * + * Integration tests that use the actual refact-lsp server. + * Requires: refact-lsp running on port 8001 + * + * Run with: npm run test:no-watch -- chatSubscription.integration + * + * Note: These tests are skipped in CI if no server is available. + */ + +/* eslint-disable @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment */ +import { describe, it, expect, vi } from "vitest"; + +// Increase test timeout for integration tests +vi.setConfig({ testTimeout: 30000 }); +import { + sendChatCommand, + sendUserMessage, + updateChatParams, + abortGeneration, +} from "../../services/refact/chatCommands"; + +const LSP_PORT = 8001; +const LSP_URL = `http://127.0.0.1:${LSP_PORT}`; + +// Check if server is available +async function isServerAvailable(): Promise { + try { + const response = await fetch(`${LSP_URL}/v1/ping`, { + signal: AbortSignal.timeout(2000), + }); + return response.ok; + } catch { + return false; + } +} + +// Generate unique chat ID +function generateChatId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`; +} + +async function withRetry( + operation: () => Promise, + retries = 3, + delayMs = 250, +): Promise { + let lastError: unknown; + + for (let attempt = 0; attempt < retries; attempt++) { + try { + return await operation(); + } catch (error) { + lastError = error; + const message = + error instanceof Error ? error.message : String(error ?? ""); + const isConnectionIssue = /ECONNREFUSED|fetch failed|NetworkError/i.test( + message, + ); + if (!isConnectionIssue || attempt === retries - 1) { + throw error; + } + await new Promise((resolve) => + setTimeout(resolve, delayMs * (attempt + 1)), + ); + } + } + + throw lastError; +} + +// Collect events from SSE stream +async function collectEvents( + chatId: string, + { + maxEvents, + timeoutMs, + stopWhen, + }: { + maxEvents: number; + timeoutMs: number; + stopWhen?: (event: unknown, events: unknown[]) => boolean; + }, +): Promise { + const events: unknown[] = []; + + return new Promise((resolve) => { + const controller = new AbortController(); + let settled = false; + const finish = () => { + if (settled) { + return; + } + settled = true; + clearTimeout(timeout); + controller.abort(); + resolve(events); + }; + const timeout = setTimeout(() => { + finish(); + }, timeoutMs); + + fetch(`${LSP_URL}/v1/chats/subscribe?chat_id=${chatId}`, { + signal: controller.signal, + }) + .then(async (response) => { + if (!response.ok) { + finish(); + return; + } + + const reader = response.body?.getReader(); + if (!reader) { + finish(); + return; + } + + const decoder = new TextDecoder(); + let buffer = ""; + + while (!settled && events.length < maxEvents) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const blocks = buffer.split("\n\n"); + buffer = blocks.pop() ?? ""; + + for (const block of blocks) { + const dataLines = block + .split("\n") + .filter((line) => line.startsWith("data:")) + .map((line) => line.slice(5).trimStart()); + + if (dataLines.length === 0) { + continue; + } + + const payload = dataLines.join("\n"); + if (payload === "[DONE]") { + continue; + } + + try { + const event = JSON.parse(payload); + events.push(event); + + if ( + (stopWhen?.(event, events) ?? false) || + events.length >= maxEvents + ) { + finish(); + return; + } + } catch { + // Ignore parse errors + } + } + } + + finish(); + }) + .catch(() => { + finish(); + }); + }); +} + +describe.skipIf(!(await isServerAvailable()))( + "Chat Subscription Integration Tests", + () => { + describe("sendChatCommand", () => { + it("should accept abort command", async () => { + const chatId = generateChatId("test-abort"); + + await expect( + sendChatCommand(chatId, LSP_PORT, undefined, { + type: "abort" as const, + }), + ).resolves.toBeUndefined(); + }); + + it("should accept set_params command", async () => { + const chatId = generateChatId("test-params"); + + await expect( + updateChatParams( + chatId, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ).resolves.toBeUndefined(); + }); + + it("should accept user_message command", async () => { + const chatId = generateChatId("test-message"); + + await updateChatParams( + chatId, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ); + + await expect( + sendUserMessage(chatId, "Hello, test!", LSP_PORT), + ).resolves.toBeUndefined(); + }); + + it("should detect duplicate commands", async () => { + const chatId = generateChatId("test-duplicate"); + const requestId = `test-${Date.now()}`; + + // First request + const response1 = await fetch( + `${LSP_URL}/v1/chats/${chatId}/commands`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + client_request_id: requestId, + type: "set_params", + patch: { model: "test" }, + }), + }, + ); + + expect(response1.status).toBe(200); + + // Second request with same ID should be detected as duplicate + const response2 = await fetch( + `${LSP_URL}/v1/chats/${chatId}/commands`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + client_request_id: requestId, + type: "set_params", + patch: { model: "test" }, + }), + }, + ); + + expect(response2.status).toBe(200); + const data = await response2.json(); + // Backend may return duplicate status or just accept it idempotently + expect( + ["duplicate", "ok", "queued"].includes(data.status as string), + ).toBe(true); + }); + }); + + describe("SSE Subscription", () => { + it("should receive snapshot on connect", async () => { + const chatId = generateChatId("test-snapshot"); + + const events = await collectEvents(chatId, { + maxEvents: 1, + timeoutMs: 5000, + }); + + expect(events.length).toBeGreaterThanOrEqual(1); + expect(events[0]).toHaveProperty("type", "snapshot"); + expect(events[0]).toHaveProperty("chat_id", chatId); + expect(events[0]).toHaveProperty("thread"); + expect(events[0]).toHaveProperty("runtime"); + expect(events[0]).toHaveProperty("messages"); + }); + + it("should receive events after sending command", async () => { + const chatId = generateChatId("test-events"); + + // Start collecting events + const eventsPromise = collectEvents(chatId, { + maxEvents: 10, + timeoutMs: 10000, + }); + + // Wait a bit for subscription to establish + await new Promise((r) => setTimeout(r, 300)); + + // Send commands + await withRetry(() => + updateChatParams( + chatId, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ); + + await withRetry(() => sendUserMessage(chatId, "Say hi", LSP_PORT)); + + const events = await eventsPromise; + + // Check we got expected events + const eventTypes = events.map( + (e: unknown) => (e as { type: string }).type, + ); + + expect(eventTypes).toContain("snapshot"); + expect(eventTypes).toContain("ack"); // Command acknowledgments + }); + + it("should receive stream events during generation", async () => { + const chatId = generateChatId("test-stream"); + + // Start collecting events + const eventsPromise = collectEvents(chatId, { + maxEvents: 20, + timeoutMs: 15000, + }); + + await new Promise((r) => setTimeout(r, 300)); + + // Set up chat and send message + await withRetry(() => + updateChatParams( + chatId, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ); + + await withRetry(() => sendUserMessage(chatId, "Say hello", LSP_PORT)); + + const events = await eventsPromise; + const eventTypes = events.map( + (e: unknown) => (e as { type: string }).type, + ); + + // Should have streaming events + expect(eventTypes).toContain("snapshot"); + expect(eventTypes).toContain("message_added"); // User message + expect(eventTypes).toContain("stream_started"); + + // May have stream_delta and stream_finished depending on timing + // Debug: eventTypes contains the received event types + }); + }); + + describe("Abort Functionality", () => { + it("should abort generation and receive message_removed", async () => { + const chatId = generateChatId("test-abort-stream"); + + // Start collecting events + const eventsPromise = collectEvents(chatId, { + maxEvents: 1000, + timeoutMs: 30000, + stopWhen: (event: unknown) => { + const type = (event as { type?: string }).type; + return type === "message_removed" || type === "stream_finished"; + }, + }); + + await new Promise((r) => setTimeout(r, 300)); + + // Set up chat with a long prompt + await withRetry(() => + updateChatParams( + chatId, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ); + + await withRetry(() => + sendUserMessage( + chatId, + "Write a long essay about programming", + LSP_PORT, + ), + ); + + // Wait briefly for generation to start, then abort. + await new Promise((r) => setTimeout(r, 200)); + + // Send abort + await withRetry(() => abortGeneration(chatId, LSP_PORT)); + + const events = await eventsPromise; + const eventTypes = events.map( + (e: unknown) => (e as { type: string }).type, + ); + + // Debug: eventTypes contains abort test events + + // Should have stream_started and either message_removed (abort) or stream_finished (too late) + expect(eventTypes).toContain("stream_started"); + expect( + eventTypes.includes("message_removed") || + eventTypes.includes("stream_finished"), + ).toBe(true); + }); + }); + + describe("Multiple Chats", () => { + it("should handle multiple independent chats", async () => { + const chatId1 = generateChatId("test-multi-1"); + const chatId2 = generateChatId("test-multi-2"); + + // Connect to both chats + const events1Promise = collectEvents(chatId1, { + maxEvents: 5, + timeoutMs: 8000, + }); + const events2Promise = collectEvents(chatId2, { + maxEvents: 5, + timeoutMs: 8000, + }); + + await new Promise((r) => setTimeout(r, 300)); + + // Send different messages to each + await withRetry(() => + updateChatParams( + chatId1, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ); + await withRetry(() => + updateChatParams( + chatId2, + { model: "refact/gpt-4.1-nano", mode: "NO_TOOLS" }, + LSP_PORT, + ), + ); + + await withRetry(() => + sendUserMessage(chatId1, "Chat 1 message", LSP_PORT), + ); + await withRetry(() => + sendUserMessage(chatId2, "Chat 2 message", LSP_PORT), + ); + + const [events1, events2] = await Promise.all([ + events1Promise, + events2Promise, + ]); + + // Each should only have events for its own chat + const chat1Ids = events1.map( + (e: unknown) => (e as { chat_id: string }).chat_id, + ); + const chat2Ids = events2.map( + (e: unknown) => (e as { chat_id: string }).chat_id, + ); + + expect(chat1Ids.every((id: string) => id === chatId1)).toBe(true); + expect(chat2Ids.every((id: string) => id === chatId2)).toBe(true); + }); + }); + }, +); diff --git a/refact-agent/gui/src/__tests__/performanceFixes.test.ts b/refact-agent/gui/src/__tests__/performanceFixes.test.ts new file mode 100644 index 000000000..ba8d606e8 --- /dev/null +++ b/refact-agent/gui/src/__tests__/performanceFixes.test.ts @@ -0,0 +1,146 @@ +import { describe, it, expect } from "vitest"; +import { applyDeltaOps, DeltaOp } from "../services/refact/chatSubscription"; +import type { ChatMessage } from "../services/refact/types"; +import { selectToolResultById } from "../features/Chat/Thread/selectors"; +import type { RootState } from "../app/store"; + +describe("applyDeltaOps", () => { + it("appends content correctly across multiple deltas", () => { + const initial: ChatMessage = { role: "assistant", content: "" }; + const ops1: DeltaOp[] = [{ op: "append_content", text: "Hello" }]; + const ops2: DeltaOp[] = [{ op: "append_content", text: " World" }]; + + const after1 = applyDeltaOps(initial, ops1); + const after2 = applyDeltaOps(after1, ops2); + + expect(after1.content).toBe("Hello"); + expect(after2.content).toBe("Hello World"); + }); + + it("appends reasoning correctly", () => { + const initial: ChatMessage = { role: "assistant", content: "" }; + const ops: DeltaOp[] = [ + { op: "append_reasoning", text: "Thinking..." }, + { op: "append_reasoning", text: " More thoughts." }, + ]; + + const result = applyDeltaOps(initial, ops); + expect((result as { reasoning_content?: string }).reasoning_content).toBe( + "Thinking... More thoughts.", + ); + }); + + it("add_citation does not mutate prior message objects", () => { + const initial: ChatMessage = { role: "assistant", content: "test" }; + const citation1 = { url: "http://example.com/1", title: "Example 1" }; + const citation2 = { url: "http://example.com/2", title: "Example 2" }; + + const after1 = applyDeltaOps(initial, [ + { op: "add_citation", citation: citation1 }, + ]); + const after2 = applyDeltaOps(after1, [ + { op: "add_citation", citation: citation2 }, + ]); + + const initialCitations = (initial as { citations?: unknown[] }).citations; + const after1Citations = (after1 as { citations?: unknown[] }).citations; + const after2Citations = (after2 as { citations?: unknown[] }).citations; + + expect(initialCitations).toBeUndefined(); + expect(after1Citations).toHaveLength(1); + expect(after2Citations).toHaveLength(2); + expect(after1Citations).not.toBe(after2Citations); + }); + + it("handles set_tool_calls", () => { + const initial: ChatMessage = { role: "assistant", content: "" }; + const toolCalls = [ + { id: "1", function: { name: "test", arguments: "{}" } }, + ]; + const ops: DeltaOp[] = [{ op: "set_tool_calls", tool_calls: toolCalls }]; + + const result = applyDeltaOps(initial, ops); + expect((result as { tool_calls?: unknown[] }).tool_calls).toEqual( + toolCalls, + ); + }); + + it("handles set_thinking_blocks", () => { + const initial: ChatMessage = { role: "assistant", content: "" }; + const blocks = [{ thinking: "test thought" }]; + const ops: DeltaOp[] = [{ op: "set_thinking_blocks", blocks }]; + + const result = applyDeltaOps(initial, ops); + expect((result as { thinking_blocks?: unknown[] }).thinking_blocks).toEqual( + blocks, + ); + }); + + it("handles set_usage", () => { + const initial: ChatMessage = { role: "assistant", content: "" }; + const usage = { prompt_tokens: 100, completion_tokens: 50 }; + const ops: DeltaOp[] = [{ op: "set_usage", usage }]; + + const result = applyDeltaOps(initial, ops); + expect((result as { usage?: unknown }).usage).toEqual(usage); + }); + + it("handles merge_extra", () => { + const initial: ChatMessage = { + role: "assistant", + content: "", + extra: { a: 1 }, + } as ChatMessage & { extra: Record }; + const ops: DeltaOp[] = [{ op: "merge_extra", extra: { b: 2 } }]; + + const result = applyDeltaOps(initial, ops); + expect((result as { extra?: Record }).extra).toEqual({ + a: 1, + b: 2, + }); + }); +}); + +describe("selectToolResultById optimization", () => { + it("finds tool result from end without array copy", () => { + const mockState = { + chat: { + current_thread_id: "test", + threads: { + test: { + thread: { + messages: [ + { role: "tool", tool_call_id: "id1", content: "first" }, + { role: "tool", tool_call_id: "id2", content: "second" }, + { role: "tool", tool_call_id: "id1", content: "third" }, + ], + }, + }, + }, + }, + } as unknown as RootState; + + const result = selectToolResultById(mockState, "id1"); + expect(result?.content).toBe("third"); + }); + + it("returns undefined for missing id", () => { + const mockState = { + chat: { + current_thread_id: "test", + threads: { + test: { + thread: { + messages: [ + { role: "tool", tool_call_id: "id1", content: "first" }, + ], + }, + }, + }, + }, + } as unknown as RootState; + + const result = selectToolResultById(mockState, "nonexistent"); + expect(result).toBeUndefined(); + }); +}); diff --git a/refact-agent/gui/src/__tests__/streamingTokenCounter.test.tsx b/refact-agent/gui/src/__tests__/streamingTokenCounter.test.tsx new file mode 100644 index 000000000..67f2fbf78 --- /dev/null +++ b/refact-agent/gui/src/__tests__/streamingTokenCounter.test.tsx @@ -0,0 +1,26 @@ +import { describe, it, expect } from "vitest"; +import * as fs from "fs"; +import * as path from "path"; + +describe("StreamingTokenCounter", () => { + it("has cleanup return in visibility useEffect", () => { + const filePath = path.resolve( + __dirname, + "../components/UsageCounter/StreamingTokenCounter.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + expect(content).toContain("return () => {"); + expect(content).toContain("window.clearTimeout(hideTimerRef.current)"); + }); + + it("uses formatNumberToFixed for token display", () => { + const filePath = path.resolve( + __dirname, + "../components/UsageCounter/StreamingTokenCounter.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + expect(content).toContain("formatNumberToFixed"); + }); +}); diff --git a/refact-agent/gui/src/__tests__/taskDerivation.test.ts b/refact-agent/gui/src/__tests__/taskDerivation.test.ts new file mode 100644 index 000000000..33de2f72b --- /dev/null +++ b/refact-agent/gui/src/__tests__/taskDerivation.test.ts @@ -0,0 +1,661 @@ +import { describe, it, expect } from "vitest"; +import type { ChatMessages } from "../services/refact/types"; +import type { TodoItem } from "../features/Chat/Thread/types"; + +const normalizeTaskStatus = (status: unknown): TodoItem["status"] | null => { + if (typeof status !== "string") return null; + switch (status.toLowerCase()) { + case "pending": + return "pending"; + case "in_progress": + case "in-progress": + case "inprogress": + return "in_progress"; + case "completed": + case "done": + case "complete": + return "completed"; + case "failed": + case "error": + return "failed"; + default: + return null; + } +}; + +const sanitizeText = (text: string, maxLen: number): string => { + return ( + text + // eslint-disable-next-line no-control-regex + .replace(/[\x00-\x1F\x7F]/g, "") + .trim() + .slice(0, maxLen) + ); +}; + +const parseTasksFromArgs = (argsStr: string): TodoItem[] | null => { + try { + const args = JSON.parse(argsStr) as unknown; + if (!args || typeof args !== "object") return null; + const tasksArray = (args as Record).tasks; + if (!Array.isArray(tasksArray)) return null; + + if (tasksArray.length === 0) return []; + + const result: TodoItem[] = []; + const seenIds = new Set(); + + for (const item of tasksArray) { + if (!item || typeof item !== "object") continue; + const t = item as Record; + + const rawId = + typeof t.id === "string" + ? t.id + : typeof t.id === "number" + ? String(t.id) + : null; + if (!rawId) continue; + + const id = sanitizeText(rawId, 50); + if (!id || seenIds.has(id)) continue; + seenIds.add(id); + + const rawContent = typeof t.content === "string" ? t.content : null; + if (!rawContent) continue; + + const content = sanitizeText(rawContent, 500); + if (!content) continue; + + const status = normalizeTaskStatus(t.status); + if (!status) continue; + + result.push({ id, content, status }); + } + return result.length > 0 ? result : null; + } catch { + return null; + } +}; + +type ToolMessage = { + role: "tool"; + tool_call_id: string; + tool_failed?: boolean; + content: string; +}; + +const deriveTasksFromMessages = ( + messages: ChatMessages, + toolMessages: ToolMessage[], +): TodoItem[] => { + const successfulToolIds = new Set( + toolMessages.filter((m) => !m.tool_failed).map((m) => m.tool_call_id), + ); + + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]; + if (msg.role !== "assistant" || !("tool_calls" in msg) || !msg.tool_calls) + continue; + + for (let j = msg.tool_calls.length - 1; j >= 0; j--) { + const tc = msg.tool_calls[j]; + if (tc.function.name !== "tasks_set" || !tc.id) continue; + if (!successfulToolIds.has(tc.id)) continue; + + const parsed = parseTasksFromArgs(tc.function.arguments); + if (parsed !== null) return parsed; + } + } + + return []; +}; + +describe("normalizeTaskStatus", () => { + it("normalizes standard statuses", () => { + expect(normalizeTaskStatus("pending")).toBe("pending"); + expect(normalizeTaskStatus("in_progress")).toBe("in_progress"); + expect(normalizeTaskStatus("completed")).toBe("completed"); + expect(normalizeTaskStatus("failed")).toBe("failed"); + }); + + it("normalizes aliases", () => { + expect(normalizeTaskStatus("done")).toBe("completed"); + expect(normalizeTaskStatus("complete")).toBe("completed"); + expect(normalizeTaskStatus("inprogress")).toBe("in_progress"); + expect(normalizeTaskStatus("in-progress")).toBe("in_progress"); + expect(normalizeTaskStatus("error")).toBe("failed"); + }); + + it("is case insensitive", () => { + expect(normalizeTaskStatus("PENDING")).toBe("pending"); + expect(normalizeTaskStatus("In_Progress")).toBe("in_progress"); + expect(normalizeTaskStatus("DONE")).toBe("completed"); + }); + + it("returns null for invalid statuses", () => { + expect(normalizeTaskStatus("invalid")).toBe(null); + expect(normalizeTaskStatus("")).toBe(null); + expect(normalizeTaskStatus(123)).toBe(null); + expect(normalizeTaskStatus(null)).toBe(null); + expect(normalizeTaskStatus(undefined)).toBe(null); + }); +}); + +describe("parseTasksFromArgs", () => { + it("parses valid tasks", () => { + const args = JSON.stringify({ + tasks: [ + { id: "1", content: "Task one", status: "pending" }, + { id: "2", content: "Task two", status: "in_progress" }, + ], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Task one", status: "pending" }, + { id: "2", content: "Task two", status: "in_progress" }, + ]); + }); + + it("returns empty array for explicit empty tasks", () => { + const args = JSON.stringify({ tasks: [] }); + expect(parseTasksFromArgs(args)).toEqual([]); + }); + + it("returns null for non-empty but all invalid tasks", () => { + const args = JSON.stringify({ + tasks: [ + { id: "", content: "No id", status: "pending" }, + { id: "2", content: "", status: "pending" }, + { id: "3", content: "Bad status", status: "invalid" }, + ], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("filters out invalid items but keeps valid ones", () => { + const args = JSON.stringify({ + tasks: [ + { id: "1", content: "Valid", status: "pending" }, + { id: "", content: "Invalid", status: "pending" }, + { id: "3", content: "Also valid", status: "completed" }, + ], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Valid", status: "pending" }, + { id: "3", content: "Also valid", status: "completed" }, + ]); + }); + + it("trims whitespace from id and content", () => { + const args = JSON.stringify({ + tasks: [{ id: " 1 ", content: " Task ", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Task", status: "pending" }, + ]); + }); + + it("rejects whitespace-only id or content", () => { + const args = JSON.stringify({ + tasks: [{ id: " ", content: "Task", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("accepts numeric id", () => { + const args = JSON.stringify({ + tasks: [{ id: 42, content: "Task", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "42", content: "Task", status: "pending" }, + ]); + }); + + it("rejects object id", () => { + const args = JSON.stringify({ + tasks: [{ id: { foo: "bar" }, content: "Task", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("deduplicates tasks with same id", () => { + const args = JSON.stringify({ + tasks: [ + { id: "1", content: "First", status: "pending" }, + { id: "1", content: "Duplicate", status: "completed" }, + { id: "2", content: "Second", status: "pending" }, + ], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "First", status: "pending" }, + { id: "2", content: "Second", status: "pending" }, + ]); + }); + + it("strips control characters from content", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Hello\x00\x1FWorld", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "HelloWorld", status: "pending" }, + ]); + }); + + it("truncates long content", () => { + const longContent = "x".repeat(600); + const args = JSON.stringify({ + tasks: [{ id: "1", content: longContent, status: "pending" }], + }); + const result = parseTasksFromArgs(args); + expect(result?.[0].content.length).toBe(500); + }); + + it("returns null for malformed JSON", () => { + expect(parseTasksFromArgs("not json")).toBe(null); + expect(parseTasksFromArgs("{incomplete")).toBe(null); + }); + + it("returns null for missing tasks field", () => { + expect(parseTasksFromArgs(JSON.stringify({}))).toBe(null); + expect(parseTasksFromArgs(JSON.stringify({ other: [] }))).toBe(null); + }); + + it("returns null for non-array tasks", () => { + expect(parseTasksFromArgs(JSON.stringify({ tasks: "string" }))).toBe(null); + expect(parseTasksFromArgs(JSON.stringify({ tasks: 123 }))).toBe(null); + }); + + it("truncates long id", () => { + const longId = "x".repeat(100); + const args = JSON.stringify({ + tasks: [{ id: longId, content: "Task", status: "pending" }], + }); + const result = parseTasksFromArgs(args); + expect(result?.[0].id.length).toBe(50); + }); + + it("handles mixed valid and invalid in large batch", () => { + const tasks = [ + { id: "1", content: "Valid 1", status: "pending" }, + { id: "", content: "Empty id", status: "pending" }, + { id: "2", content: "", status: "pending" }, + { id: "3", content: "Valid 3", status: "invalid_status" }, + { id: "4", content: "Valid 4", status: "completed" }, + { id: null, content: "Null id", status: "pending" }, + { id: "5", content: null, status: "pending" }, + { id: "6", content: "Valid 6", status: "in_progress" }, + ]; + const args = JSON.stringify({ tasks }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Valid 1", status: "pending" }, + { id: "4", content: "Valid 4", status: "completed" }, + { id: "6", content: "Valid 6", status: "in_progress" }, + ]); + }); + + it("handles unicode content", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Fix bug 🐛 in auth", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Fix bug 🐛 in auth", status: "pending" }, + ]); + }); + + it("strips tabs from content", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Before\tAfter", status: "pending" }], + }); + const result = parseTasksFromArgs(args); + expect(result?.[0].content).toBe("BeforeAfter"); + }); + + it("handles deeply nested invalid structure", () => { + const args = JSON.stringify({ + tasks: [[{ id: "1", content: "Nested", status: "pending" }]], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("handles task with extra fields gracefully", () => { + const args = JSON.stringify({ + tasks: [ + { + id: "1", + content: "Task", + status: "pending", + extra: "ignored", + nested: { deep: true }, + }, + ], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "1", content: "Task", status: "pending" }, + ]); + }); + + it("handles numeric id zero", () => { + const args = JSON.stringify({ + tasks: [{ id: 0, content: "Task zero", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "0", content: "Task zero", status: "pending" }, + ]); + }); + + it("handles boolean id by rejecting", () => { + const args = JSON.stringify({ + tasks: [{ id: true, content: "Task", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("handles array id by rejecting", () => { + const args = JSON.stringify({ + tasks: [{ id: [1, 2], content: "Task", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toBe(null); + }); + + it("preserves task order", () => { + const args = JSON.stringify({ + tasks: [ + { id: "c", content: "Third", status: "pending" }, + { id: "a", content: "First", status: "pending" }, + { id: "b", content: "Second", status: "pending" }, + ], + }); + const result = parseTasksFromArgs(args); + expect(result?.map((t) => t.id)).toEqual(["c", "a", "b"]); + }); + + it("handles all statuses", () => { + const args = JSON.stringify({ + tasks: [ + { id: "1", content: "Task 1", status: "pending" }, + { id: "2", content: "Task 2", status: "in_progress" }, + { id: "3", content: "Task 3", status: "completed" }, + { id: "4", content: "Task 4", status: "failed" }, + ], + }); + const result = parseTasksFromArgs(args); + expect(result?.map((t) => t.status)).toEqual([ + "pending", + "in_progress", + "completed", + "failed", + ]); + }); + + it("handles negative numeric id", () => { + const args = JSON.stringify({ + tasks: [{ id: -1, content: "Negative", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "-1", content: "Negative", status: "pending" }, + ]); + }); + + it("handles float numeric id", () => { + const args = JSON.stringify({ + tasks: [{ id: 3.14, content: "Float", status: "pending" }], + }); + expect(parseTasksFromArgs(args)).toEqual([ + { id: "3.14", content: "Float", status: "pending" }, + ]); + }); +}); + +describe("deriveTasksFromMessages", () => { + const makeAssistantMsg = (toolCalls: { id: string; args: string }[]) => ({ + role: "assistant" as const, + content: "Response", + tool_calls: toolCalls.map((tc, index) => ({ + id: tc.id, + index, + type: "function" as const, + function: { name: "tasks_set", arguments: tc.args }, + })), + }); + + const makeToolMsg = (toolCallId: string, failed = false): ToolMessage => ({ + role: "tool", + tool_call_id: toolCallId, + tool_failed: failed, + content: "OK", + }); + + it("returns empty array when no tasks_set calls", () => { + const messages: ChatMessages = [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi" }, + ]; + expect(deriveTasksFromMessages(messages, [])).toEqual([]); + }); + + it("ignores tasks_set without tool result", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + expect(deriveTasksFromMessages(messages, [])).toEqual([]); + }); + + it("ignores tasks_set with failed tool result", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + const toolMessages = [makeToolMsg("tc1", true)]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([]); + }); + + it("parses tasks_set with successful tool result", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + const toolMessages = [makeToolMsg("tc1", false)]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "1", content: "Task", status: "pending" }, + ]); + }); + + it("returns last valid tasks_set (backwards scan)", () => { + const args1 = JSON.stringify({ + tasks: [{ id: "1", content: "First", status: "pending" }], + }); + const args2 = JSON.stringify({ + tasks: [{ id: "2", content: "Second", status: "completed" }], + }); + const messages: ChatMessages = [ + makeAssistantMsg([{ id: "tc1", args: args1 }]), + makeAssistantMsg([{ id: "tc2", args: args2 }]), + ]; + const toolMessages = [makeToolMsg("tc1"), makeToolMsg("tc2")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "2", content: "Second", status: "completed" }, + ]); + }); + + it("skips invalid last tasks_set and uses previous valid one", () => { + const validArgs = JSON.stringify({ + tasks: [{ id: "1", content: "Valid", status: "pending" }], + }); + const invalidArgs = JSON.stringify({ + tasks: [{ id: "", content: "Invalid", status: "pending" }], + }); + const messages: ChatMessages = [ + makeAssistantMsg([{ id: "tc1", args: validArgs }]), + makeAssistantMsg([{ id: "tc2", args: invalidArgs }]), + ]; + const toolMessages = [makeToolMsg("tc1"), makeToolMsg("tc2")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "1", content: "Valid", status: "pending" }, + ]); + }); + + it("clears tasks when last valid tasks_set has empty array", () => { + const args1 = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const args2 = JSON.stringify({ tasks: [] }); + const messages: ChatMessages = [ + makeAssistantMsg([{ id: "tc1", args: args1 }]), + makeAssistantMsg([{ id: "tc2", args: args2 }]), + ]; + const toolMessages = [makeToolMsg("tc1"), makeToolMsg("tc2")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([]); + }); + + it("handles tool_failed undefined as success", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + const toolMessages: ToolMessage[] = [ + { role: "tool", tool_call_id: "tc1", content: "OK" }, + ]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "1", content: "Task", status: "pending" }, + ]); + }); + + it("handles multiple tool calls in one assistant message", () => { + const args1 = JSON.stringify({ + tasks: [{ id: "1", content: "First", status: "pending" }], + }); + const args2 = JSON.stringify({ + tasks: [{ id: "2", content: "Second", status: "completed" }], + }); + const messages: ChatMessages = [ + makeAssistantMsg([ + { id: "tc1", args: args1 }, + { id: "tc2", args: args2 }, + ]), + ]; + const toolMessages = [makeToolMsg("tc1"), makeToolMsg("tc2")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "2", content: "Second", status: "completed" }, + ]); + }); + + it("ignores non-tasks_set tool calls", () => { + const messages: ChatMessages = [ + { + role: "assistant", + content: "Response", + tool_calls: [ + { + id: "tc1", + index: 0, + type: "function" as const, + function: { name: "cat", arguments: '{"path":"file.txt"}' }, + }, + ], + }, + ]; + const toolMessages = [makeToolMsg("tc1")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([]); + }); + + it("handles interleaved user messages", () => { + const args1 = JSON.stringify({ + tasks: [{ id: "1", content: "First", status: "pending" }], + }); + const args2 = JSON.stringify({ + tasks: [{ id: "2", content: "Updated", status: "completed" }], + }); + const messages: ChatMessages = [ + makeAssistantMsg([{ id: "tc1", args: args1 }]), + { role: "user", content: "Continue" }, + makeAssistantMsg([{ id: "tc2", args: args2 }]), + ]; + const toolMessages = [makeToolMsg("tc1"), makeToolMsg("tc2")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "2", content: "Updated", status: "completed" }, + ]); + }); + + it("handles assistant message without tool_calls", () => { + const messages: ChatMessages = [ + { role: "assistant", content: "Just text response" }, + ]; + expect(deriveTasksFromMessages(messages, [])).toEqual([]); + }); + + it("handles tool call with empty id", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [ + { + role: "assistant", + content: "Response", + tool_calls: [ + { + id: "", + index: 0, + type: "function" as const, + function: { name: "tasks_set", arguments: args }, + }, + ], + }, + ]; + const toolMessages = [makeToolMsg("")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([]); + }); + + it("handles mixed successful and failed tool results", () => { + const args1 = JSON.stringify({ + tasks: [{ id: "1", content: "First", status: "pending" }], + }); + const args2 = JSON.stringify({ + tasks: [{ id: "2", content: "Second", status: "completed" }], + }); + const messages: ChatMessages = [ + makeAssistantMsg([{ id: "tc1", args: args1 }]), + makeAssistantMsg([{ id: "tc2", args: args2 }]), + ]; + const toolMessages = [makeToolMsg("tc1", false), makeToolMsg("tc2", true)]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "1", content: "First", status: "pending" }, + ]); + }); + + it("returns empty when only failed tool results exist", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + const toolMessages = [makeToolMsg("tc1", true)]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([]); + }); + + it("handles large conversation with many tasks_set calls", () => { + const messages: ChatMessages = []; + const toolMessages: ToolMessage[] = []; + + for (let i = 0; i < 10; i++) { + const args = JSON.stringify({ + tasks: [{ id: String(i), content: `Task ${i}`, status: "pending" }], + }); + messages.push(makeAssistantMsg([{ id: `tc${i}`, args }])); + toolMessages.push(makeToolMsg(`tc${i}`)); + } + + const result = deriveTasksFromMessages(messages, toolMessages); + expect(result).toEqual([{ id: "9", content: "Task 9", status: "pending" }]); + }); + + it("handles tool result arriving before corresponding assistant message in array", () => { + const args = JSON.stringify({ + tasks: [{ id: "1", content: "Task", status: "pending" }], + }); + const messages: ChatMessages = [makeAssistantMsg([{ id: "tc1", args }])]; + const toolMessages = [makeToolMsg("tc1")]; + expect(deriveTasksFromMessages(messages, toolMessages)).toEqual([ + { id: "1", content: "Task", status: "pending" }, + ]); + }); +}); diff --git a/refact-agent/gui/src/__tests__/useChatSubscription.test.tsx b/refact-agent/gui/src/__tests__/useChatSubscription.test.tsx new file mode 100644 index 000000000..fa3b81e53 --- /dev/null +++ b/refact-agent/gui/src/__tests__/useChatSubscription.test.tsx @@ -0,0 +1,84 @@ +import { describe, it, expect, vi, afterEach } from "vitest"; +import { renderHook } from "@testing-library/react"; +import { Provider } from "react-redux"; +import { configureStore } from "@reduxjs/toolkit"; +import { useChatSubscription } from "../hooks/useChatSubscription"; +import { chatReducer } from "../features/Chat/Thread/reducer"; +import { reducer as configReducer } from "../features/Config/configSlice"; + +const createTestStore = () => { + return configureStore({ + reducer: { + chat: chatReducer, + config: configReducer, + }, + }); +}; + +const wrapper = ({ children }: { children: React.ReactNode }) => ( + {children} +); + +describe("useChatSubscription", () => { + afterEach(() => { + vi.useRealTimers(); + }); + + it("should return disconnected status when disabled", () => { + const { result } = renderHook( + () => useChatSubscription("test-chat", { enabled: false }), + { wrapper }, + ); + + expect(result.current.status).toBe("disconnected"); + expect(result.current.isConnected).toBe(false); + expect(result.current.isConnecting).toBe(false); + }); + + it("should return disconnected status when chatId is null", () => { + const { result } = renderHook( + () => useChatSubscription(null, { enabled: true }), + { wrapper }, + ); + + expect(result.current.status).toBe("disconnected"); + }); + + it("should return disconnected status when chatId is undefined", () => { + const { result } = renderHook( + () => useChatSubscription(undefined, { enabled: true }), + { wrapper }, + ); + + expect(result.current.status).toBe("disconnected"); + }); + + it("should have connect and disconnect functions", () => { + const { result } = renderHook( + () => useChatSubscription("test-chat", { enabled: false }), + { wrapper }, + ); + + expect(typeof result.current.connect).toBe("function"); + expect(typeof result.current.disconnect).toBe("function"); + }); + + it("should have lastSeq as string", () => { + const { result } = renderHook( + () => useChatSubscription("test-chat", { enabled: false }), + { wrapper }, + ); + + expect(typeof result.current.lastSeq).toBe("string"); + expect(result.current.lastSeq).toBe("0"); + }); + + it("should have null error initially", () => { + const { result } = renderHook( + () => useChatSubscription("test-chat", { enabled: false }), + { wrapper }, + ); + + expect(result.current.error).toBeNull(); + }); +}); diff --git a/refact-agent/gui/src/__tests__/userInputProcessing.test.tsx b/refact-agent/gui/src/__tests__/userInputProcessing.test.tsx new file mode 100644 index 000000000..4c2ffb025 --- /dev/null +++ b/refact-agent/gui/src/__tests__/userInputProcessing.test.tsx @@ -0,0 +1,76 @@ +import { describe, it, expect } from "vitest"; +import * as fs from "fs"; +import * as path from "path"; + +describe("UserInput processing", () => { + it("uses functional array methods (filter/map) for text extraction", () => { + const filePath = path.resolve( + __dirname, + "../components/ChatContent/UserInput.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + // Current implementation uses filter/map for processing + expect(content).toContain(".filter("); + expect(content).toContain(".map("); + // Should not use recursive patterns that could cause stack overflow + expect(content).not.toMatch( + /function processLines\([^)]*\):[^{]*\{[^}]*return processLines\(/, + ); + }); + + it("uses useMemo for memoized content extraction", () => { + const filePath = path.resolve( + __dirname, + "../components/ChatContent/UserInput.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + // Current implementation uses useMemo for performance + expect(content).toContain("useMemo"); + expect(content).toContain("textContent"); + }); + + it("extracts images separately from text content", () => { + const filePath = path.resolve( + __dirname, + "../components/ChatContent/UserInput.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + // Should have separate image extraction logic + expect(content).toContain("images"); + expect(content).toContain("image_url"); + }); +}); + +describe("URL sanitization in AssistantInput", () => { + it("filters citations by URL protocol", () => { + const filePath = path.resolve( + __dirname, + "../components/ChatContent/AssistantInput.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + expect(content).toContain('url.protocol === "http:"'); + expect(content).toContain('url.protocol === "https:"'); + }); +}); + +describe("DiffTitle uses numeric counts", () => { + it("displays counts instead of repeated characters", () => { + const filePath = path.resolve( + __dirname, + "../components/ChatContent/DiffContent.tsx", + ); + const content = fs.readFileSync(filePath, "utf-8"); + + expect(content).toContain("addCount"); + expect(content).toContain("removeCount"); + const greenIdx = content.indexOf("+{addCount}"); + const redIdx = content.indexOf("-{removeCount}"); + expect(greenIdx).toBeLessThan(redIdx); + expect(content).not.toContain('"+".repeat'); + expect(content).not.toContain('"-".repeat'); + }); +}); diff --git a/refact-agent/gui/src/app/middleware.ts b/refact-agent/gui/src/app/middleware.ts index e8a4b8fe3..62ce906fc 100644 --- a/refact-agent/gui/src/app/middleware.ts +++ b/refact-agent/gui/src/app/middleware.ts @@ -5,28 +5,45 @@ import { isRejected, } from "@reduxjs/toolkit"; import { - doneStreaming, newChatAction, - chatAskQuestionThunk, restoreChat, newIntegrationChat, - setIsWaitingForResponse, - upsertToolCall, - sendCurrentChatToLspAfterToolCallUpdate, - chatResponse, - chatError, + applyChatEvent, + clearThreadPauseReasons, + setThreadConfirmationStatus, + setThreadPauseReasons, + resetThreadImages, + switchToThread, + selectCurrentThreadId, + ideToolRequired, + saveTitle, + setBoostReasoning, + setIncludeProjectInfo, + setContextTokensCap, + setEnabledCheckpoints, + setToolUse, + setChatMode, + setThreadMode, + setChatModel, + setAutoApproveEditingTools, + setAutoApproveDangerousCommands, + setIncreaseMaxTokens, + setAreFollowUpsEnabled, + setSystemPrompt, + setReasoningEffort, + setThinkingBudget, + setTemperature, + setMaxTokens, + buildThreadParamsPatch, } from "../features/Chat/Thread"; +import { saveLastThreadParams } from "../utils/threadStorage"; import { statisticsApi } from "../services/refact/statistics"; import { integrationsApi } from "../services/refact/integrations"; import { dockerApi } from "../services/refact/docker"; import { capsApi, isCapsErrorResponse } from "../services/refact/caps"; import { promptsApi } from "../services/refact/prompts"; import { toolsApi } from "../services/refact/tools"; -import { - commandsApi, - isDetailMessage, - isDetailMessageWithErrorType, -} from "../services/refact/commands"; +import { commandsApi, isDetailMessage } from "../services/refact/commands"; import { pathApi } from "../services/refact/path"; import { pingApi } from "../services/refact/ping"; import { @@ -35,20 +52,26 @@ import { setIsAuthError, } from "../features/Errors/errorsSlice"; import { setThemeMode, updateConfig } from "../features/Config/configSlice"; -import { resetAttachedImagesSlice } from "../features/AttachedImages"; import { nextTip } from "../features/TipOfTheDay"; import { telemetryApi } from "../services/refact/telemetry"; +import { tasksApi } from "../services/refact/tasks"; +import { closeTask } from "../features/Tasks/tasksSlice"; +import { closeThread } from "../features/Chat/Thread"; import { CONFIG_PATH_URL, FULL_PATH_URL } from "../services/refact/consts"; -import { - resetConfirmationInteractedState, - updateConfirmationAfterIdeToolUse, -} from "../features/ToolConfirmation/confirmationSlice"; import { ideToolCallResponse, ideForceReloadProjectTreeFiles, + ideTaskDone, + ideAskQuestions, } from "../hooks/useEventBusForIDE"; import { upsertToolCallIntoHistory } from "../features/History/historySlice"; -import { isToolResponse, modelsApi, providersApi } from "../services/refact"; +import { + isToolMessage, + isDiffMessage, + modelsApi, + providersApi, +} from "../services/refact"; +import { sendChatCommand } from "../services/refact/chatCommands"; const AUTH_ERROR_MESSAGE = "There is an issue with your API key. Check out your API Key or re-login"; @@ -60,24 +83,73 @@ const startListening = listenerMiddleware.startListening.withTypes< >(); startListening({ - // TODO: figure out why this breaks the tests when it's not a function :/ - matcher: isAnyOf( - (d: unknown): d is ReturnType => - newChatAction.match(d), - (d: unknown): d is ReturnType => restoreChat.match(d), - ), + actionCreator: newChatAction, + effect: async (_action, listenerApi) => { + const state = listenerApi.getState(); + const chatId = state.chat.current_thread_id; + + [ + statisticsApi.util.resetApiState(), + toolsApi.util.resetApiState(), + commandsApi.util.resetApiState(), + ].forEach((api) => listenerApi.dispatch(api)); + + listenerApi.dispatch(resetThreadImages({ id: chatId })); + listenerApi.dispatch(clearThreadPauseReasons({ id: chatId })); + listenerApi.dispatch( + setThreadConfirmationStatus({ + id: chatId, + wasInteracted: false, + confirmationStatus: true, + }), + ); + listenerApi.dispatch(clearError()); + + // New chats are created client-side first; sync the initial params to backend + // immediately so the first snapshot doesn't overwrite local defaults. + const runtime = state.chat.threads[chatId]; + const port = state.config.lspPort; + if (!runtime || !port || !chatId) return; + + try { + const patch = buildThreadParamsPatch(runtime.thread, true); + + // If reasoning is enabled by defaults (new chat), ensure temperature is sent as null. + // Otherwise backend may fall back to a numeric default (often 0), which is invalid + // for reasoning-enabled providers. + const isReasoningEnabled = + Boolean(runtime.thread.boost_reasoning) || + runtime.thread.reasoning_effort != null || + runtime.thread.thinking_budget != null; + if (isReasoningEnabled) { + patch.temperature = null; + } + + if (Object.keys(patch).length > 0) { + await sendChatCommand(chatId, port, state.config.apiKey ?? undefined, { + type: "set_params", + patch, + }); + } + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: restoreChat, effect: (_action, listenerApi) => { + const state = listenerApi.getState(); + const chatId = state.chat.current_thread_id; + [ - // pingApi.util.resetApiState(), statisticsApi.util.resetApiState(), - // capsApi.util.resetApiState(), - // promptsApi.util.resetApiState(), toolsApi.util.resetApiState(), commandsApi.util.resetApiState(), - resetAttachedImagesSlice(), - resetConfirmationInteractedState(), ].forEach((api) => listenerApi.dispatch(api)); + listenerApi.dispatch(resetThreadImages({ id: chatId })); listenerApi.dispatch(clearError()); }, }); @@ -290,27 +362,23 @@ startListening({ listenerApi.dispatch(setIsAuthError(isAuthError)); } - if ( - chatAskQuestionThunk.rejected.match(action) && - !action.meta.aborted && - typeof action.payload === "string" - ) { - listenerApi.dispatch(setError(action.payload)); - } - if ( (providersApi.endpoints.updateProvider.matchRejected(action) || providersApi.endpoints.getProvider.matchRejected(action) || - providersApi.endpoints.getProviderTemplates.matchRejected(action) || providersApi.endpoints.getConfiguredProviders.matchRejected(action)) && + typeof action.meta === "object" && + "condition" in action.meta && !action.meta.condition ) { - const errorStatus = action.payload?.status; + const payload = action.payload as + | { status?: number; data?: unknown } + | undefined; + const errorStatus = payload?.status; const isAuthError = errorStatus === 401; const message = isAuthError ? AUTH_ERROR_MESSAGE - : isDetailMessage(action.payload?.data) - ? action.payload.data.detail + : isDetailMessage(payload?.data) + ? (payload.data as { detail: string }).detail : `provider update error.`; listenerApi.dispatch(setError(message)); @@ -335,19 +403,21 @@ startListening({ }); startListening({ - actionCreator: updateConfig, + matcher: isAnyOf( + providersApi.endpoints.updateProvider.matchFulfilled, + providersApi.endpoints.oauthExchange.matchFulfilled, + ), effect: (_action, listenerApi) => { - listenerApi.dispatch(pingApi.util.resetApiState()); + listenerApi.dispatch(clearError()); + listenerApi.dispatch(capsApi.util.resetApiState()); + listenerApi.dispatch(modelsApi.util.resetApiState()); }, }); startListening({ - actionCreator: doneStreaming, - effect: (action, listenerApi) => { - const state = listenerApi.getState(); - if (action.payload.id === state.chat.thread.id) { - listenerApi.dispatch(resetAttachedImagesSlice()); - } + actionCreator: updateConfig, + effect: (_action, listenerApi) => { + listenerApi.dispatch(pingApi.util.resetApiState()); }, }); @@ -373,27 +443,9 @@ startListening({ }, }); -startListening({ - actionCreator: newIntegrationChat, - effect: async (_action, listenerApi) => { - const state = listenerApi.getState(); - // TODO: set mode to configure ? or infer it later - // TODO: create a dedicated thunk for this. - await listenerApi.dispatch( - chatAskQuestionThunk({ - messages: state.chat.thread.messages, - chatId: state.chat.thread.id, - }), - ); - }, -}); - -// Telemetry +// Telemetry for path API startListening({ matcher: isAnyOf( - chatAskQuestionThunk.rejected.match, - chatAskQuestionThunk.fulfilled.match, - // give files api pathApi.endpoints.getFullPath.matchFulfilled, pathApi.endpoints.getFullPath.matchRejected, pathApi.endpoints.customizationPath.matchFulfilled, @@ -404,48 +456,6 @@ startListening({ pathApi.endpoints.integrationsPath.matchRejected, ), effect: (action, listenerApi) => { - const state = listenerApi.getState(); - if (chatAskQuestionThunk.rejected.match(action) && !action.meta.condition) { - const { chatId, mode } = action.meta.arg; - const thread = - chatId in state.chat.cache - ? state.chat.cache[chatId] - : state.chat.thread; - const scope = `sendChat_${thread.model}_${mode}`; - - if (isDetailMessageWithErrorType(action.payload)) { - const errorMessage = action.payload.detail; - listenerApi.dispatch( - action.payload.errorType === "GLOBAL" - ? setError(errorMessage) - : chatError({ id: chatId, message: errorMessage }), - ); - const thunk = telemetryApi.endpoints.sendTelemetryChatEvent.initiate({ - scope, - success: false, - error_message: errorMessage, - }); - void listenerApi.dispatch(thunk); - } - } - - if (chatAskQuestionThunk.fulfilled.match(action)) { - const { chatId, mode } = action.meta.arg; - const thread = - chatId in state.chat.cache - ? state.chat.cache[chatId] - : state.chat.thread; - const scope = `sendChat_${thread.model}_${mode}`; - - const thunk = telemetryApi.endpoints.sendTelemetryChatEvent.initiate({ - scope, - success: true, - error_message: "", - }); - - void listenerApi.dispatch(thunk); - } - if (pathApi.endpoints.getFullPath.matchFulfilled(action)) { const thunk = telemetryApi.endpoints.sendTelemetryNetEvent.initiate({ url: FULL_PATH_URL, @@ -500,32 +510,35 @@ startListening({ }, }); -// Tool Call results from ide. startListening({ actionCreator: ideToolCallResponse, - effect: (action, listenerApi) => { + effect: async (action, listenerApi) => { const state = listenerApi.getState(); + const chatId = action.payload.chatId; + const { toolCallId, accepted } = action.payload; listenerApi.dispatch(upsertToolCallIntoHistory(action.payload)); - listenerApi.dispatch(upsertToolCall(action.payload)); - listenerApi.dispatch(updateConfirmationAfterIdeToolUse(action.payload)); - const pauseReasons = state.confirmation.pauseReasons.filter( - (reason) => reason.tool_call_id !== action.payload.toolCallId, - ); + const port = state.config.lspPort; + if (!port) return; - if (pauseReasons.length === 0) { - listenerApi.dispatch(resetConfirmationInteractedState()); - listenerApi.dispatch(setIsWaitingForResponse(false)); - } + const apiKey = state.config.apiKey; + const content = + accepted === true + ? "The user accepted the changes." + : accepted === false + ? "The user rejected the changes." + : "The user applied the changes with modifications."; - if (pauseReasons.length === 0 && action.payload.accepted) { - void listenerApi.dispatch( - sendCurrentChatToLspAfterToolCallUpdate({ - chatId: action.payload.chatId, - toolCallId: action.payload.toolCallId, - }), - ); + try { + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "ide_tool_result", + tool_call_id: toolCallId, + content, + tool_failed: accepted === false, + }); + } catch { + // Silently ignore - backend may not support this command } }, }); @@ -545,15 +558,657 @@ startListening({ }, }); -// JB file refresh -// TBD: this could include diff messages to startListening({ - actionCreator: chatResponse, + actionCreator: setThreadPauseReasons, + effect: (action, listenerApi) => { + const state = listenerApi.getState(); + const currentThreadId = selectCurrentThreadId(state); + const threadIdNeedingConfirmation = action.payload.id; + + if (threadIdNeedingConfirmation !== currentThreadId) { + listenerApi.dispatch(switchToThread({ id: threadIdNeedingConfirmation })); + } + }, +}); + +startListening({ + actionCreator: saveTitle, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.id; + const title = action.payload.title; + const isTitleGenerated = action.payload.isTitleGenerated; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { title, is_title_generated: isTitleGenerated }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: applyChatEvent, effect: (action, listenerApi) => { const state = listenerApi.getState(); if (state.config.host !== "jetbrains") return; - if (!isToolResponse(action.payload)) return; if (!window.postIntellijMessage) return; - window.postIntellijMessage(ideForceReloadProjectTreeFiles()); + + const event = action.payload; + if (event.type === "message_added") { + const msg = event.message; + if (isToolMessage(msg) || isDiffMessage(msg)) { + window.postIntellijMessage(ideForceReloadProjectTreeFiles()); + } + } + }, +}); + +startListening({ + actionCreator: applyChatEvent, + effect: (action, listenerApi) => { + const event = action.payload; + if (event.type === "ide_tool_required") { + listenerApi.dispatch( + ideToolRequired({ + chatId: event.chat_id, + toolCallId: event.tool_call_id, + toolName: event.tool_name, + args: event.args, + }), + ); + } + }, +}); + +// Type definitions for tool message content +interface TaskDoneContent { + type: "task_done"; + summary?: string; + knowledge_path?: string; +} + +interface AskQuestionsContent { + type: "ask_questions"; + questions: { id: string; type: string; text: string; options?: string[] }[]; +} + +type ToolMessageContent = + | TaskDoneContent + | AskQuestionsContent + | { type: string }; + +function isTaskDoneContent( + content: ToolMessageContent, +): content is TaskDoneContent { + return content.type === "task_done"; +} + +function isAskQuestionsContent( + content: ToolMessageContent, +): content is AskQuestionsContent { + return ( + content.type === "ask_questions" && + "questions" in content && + Array.isArray(content.questions) + ); +} + +let cachedPostMessage: ((message: Record) => void) | null = + null; + +function getPostMessageForHost(): (message: Record) => void { + if (cachedPostMessage) return cachedPostMessage; + if (window.acquireVsCodeApi) { + cachedPostMessage = window.acquireVsCodeApi().postMessage; + } else if (window.postIntellijMessage) { + cachedPostMessage = window.postIntellijMessage; + } else { + cachedPostMessage = (msg) => window.postMessage(msg, "*"); + } + return cachedPostMessage; +} + +function isIdeHost(): boolean { + return !!(window.acquireVsCodeApi ?? window.postIntellijMessage); +} + +function safeParseJson(str: string): unknown { + try { + return JSON.parse(str); + } catch { + return undefined; + } +} + +startListening({ + actionCreator: applyChatEvent, + effect: (action) => { + if (!isIdeHost()) return; + + const event = action.payload; + if (event.type !== "message_added") return; + + const msg = event.message; + if (!isToolMessage(msg)) return; + if (typeof msg.content !== "string") return; + + const parsed = safeParseJson(msg.content); + if (!parsed || typeof parsed !== "object") return; + + const content = parsed as ToolMessageContent; + const chatId = event.chat_id; + const toolCallId = msg.tool_call_id; + const postToIde = getPostMessageForHost(); + + if (isTaskDoneContent(content)) { + postToIde( + ideTaskDone({ + chatId, + toolCallId, + summary: content.summary ?? "Task completed", + knowledgePath: content.knowledge_path, + }), + ); + } else if (isAskQuestionsContent(content)) { + postToIde( + ideAskQuestions({ + chatId, + toolCallId, + questions: content.questions, + }), + ); + } + }, +}); + +// Sync thread params to backend when changed via Redux actions +startListening({ + actionCreator: setBoostReasoning, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { boost_reasoning: action.payload.value }, + }); + + // When reasoning is enabled, temperature must be unset. + // This avoids provider-side validation errors. + if (action.payload.value) { + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { temperature: null }, + }); + } + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setReasoningEffort, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { reasoning_effort: action.payload.value }, + }); + + // Any explicit reasoning effort implies reasoning mode: unset temperature. + if (action.payload.value != null) { + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { temperature: null }, + }); + } + } catch { + // Silently ignore + } + }, +}); + +startListening({ + actionCreator: setThinkingBudget, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { thinking_budget: action.payload.value }, + }); + + // Any explicit thinking budget implies reasoning mode: unset temperature. + if (action.payload.value != null) { + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { temperature: null }, + }); + } + } catch { + // Silently ignore errors - user will see them via SSE events + } + }, +}); + +startListening({ + actionCreator: setTemperature, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { temperature: action.payload.value }, + }); + } catch { + // Silently ignore errors - user will see them via SSE events + } + }, +}); + +startListening({ + actionCreator: setMaxTokens, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { max_tokens: action.payload.value }, + }); + } catch { + // Silently ignore + } + }, +}); + +startListening({ + actionCreator: setAutoApproveEditingTools, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { auto_approve_editing_tools: action.payload.value }, + }); + } catch { + /* ignore */ + } + }, +}); + +startListening({ + actionCreator: setAutoApproveDangerousCommands, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { auto_approve_dangerous_commands: action.payload.value }, + }); + } catch { + /* ignore */ + } + }, +}); + +startListening({ + actionCreator: setIncludeProjectInfo, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { include_project_info: action.payload.value }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setContextTokensCap, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { context_tokens_cap: action.payload.value }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setEnabledCheckpoints, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = state.chat.current_thread_id; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { checkpoints_enabled: action.payload }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setToolUse, + effect: async (_action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = state.chat.current_thread_id; + const runtime = state.chat.threads[chatId]; + + if (!port || !chatId || !runtime) return; + if (runtime.thread.messages.length > 0) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { + tool_use: runtime.thread.tool_use, + mode: runtime.thread.mode, + }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setChatMode, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = state.chat.current_thread_id; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { mode: action.payload }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setThreadMode, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = action.payload.chatId; + const runtime = state.chat.threads[chatId]; + + if (!port || !chatId || !runtime) return; + if (runtime.thread.messages.length > 0) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { mode: action.payload.mode }, + }); + } catch { + // Silently ignore - backend may not support this command + } + }, +}); + +startListening({ + actionCreator: setChatModel, + effect: async (action, listenerApi) => { + const state = listenerApi.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + const chatId = state.chat.current_thread_id; + + if (!port || !chatId) return; + + try { + const { sendChatCommand } = await import( + "../services/refact/chatCommands" + ); + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { model: action.payload }, + }); + } catch { + /* ignore */ + } + }, +}); + +startListening({ + matcher: isAnyOf( + setChatModel, + setBoostReasoning, + setReasoningEffort, + setThinkingBudget, + setTemperature, + setMaxTokens, + setIncreaseMaxTokens, + setIncludeProjectInfo, + setContextTokensCap, + setEnabledCheckpoints, + setAreFollowUpsEnabled, + setChatMode, + setThreadMode, + setSystemPrompt, + ), + effect: (_action, listenerApi) => { + const state = listenerApi.getState(); + const chatId = setThreadMode.match(_action) + ? _action.payload.chatId + : state.chat.current_thread_id; + const runtime = state.chat.threads[chatId]; + if (!runtime) return; + + const isUnstartedChat = runtime.thread.messages.length === 0; + const shouldPersistForNewChats = + isUnstartedChat || + setBoostReasoning.match(_action) || + setReasoningEffort.match(_action) || + setThinkingBudget.match(_action); + if (!shouldPersistForNewChats) return; + + // Persist the updated param(s) as defaults for *new* chats. + // IMPORTANT: For started chats, we only persist reasoning-related toggles + // (boost_reasoning / reasoning_effort / thinking_budget), keeping other + // sampling params “sticky” only before the first message. + const mode = runtime.thread.mode; + const patch: Parameters[0] = { mode }; + + if (isUnstartedChat) { + patch.model = runtime.thread.model; + patch.temperature = runtime.thread.temperature; + patch.max_tokens = runtime.thread.max_tokens; + patch.increase_max_tokens = runtime.thread.increase_max_tokens; + patch.include_project_info = runtime.thread.include_project_info; + patch.context_tokens_cap = runtime.thread.context_tokens_cap; + patch.system_prompt = state.chat.system_prompt; + patch.checkpoints_enabled = state.chat.checkpoints_enabled; + patch.follow_ups_enabled = state.chat.follow_ups_enabled; + } + + if (setBoostReasoning.match(_action)) { + patch.boost_reasoning = runtime.thread.boost_reasoning; + // preserve temperature reset as part of “reasoning defaults” + patch.temperature = runtime.thread.temperature; + } + if (setReasoningEffort.match(_action)) { + patch.reasoning_effort = runtime.thread.reasoning_effort; + patch.temperature = runtime.thread.temperature; + } + if (setThinkingBudget.match(_action)) { + patch.thinking_budget = runtime.thread.thinking_budget; + patch.temperature = runtime.thread.temperature; + } + + // Still persist model changes after start (matches current UX). + if (setChatModel.match(_action)) { + patch.model = runtime.thread.model; + } + + saveLastThreadParams(patch); + }, +}); + +// Thread params (model, temperature, etc.) are now sent synchronously +// before the user_message in each submit code path (actions.ts, useChatActions.ts), +// eliminating the race condition where this async listener could fire +// after the user_message had already triggered generation. + +startListening({ + matcher: tasksApi.endpoints.deleteTask.matchFulfilled, + effect: (action, listenerApi) => { + const taskId = action.meta.arg.originalArgs; + const state = listenerApi.getState(); + const threads = state.chat.threads as Record< + string, + | { + thread: { + task_meta?: { task_id: string }; + is_task_chat?: boolean; + id: string; + }; + } + | undefined + >; + + for (const [threadId, runtime] of Object.entries(threads)) { + if (!runtime) continue; + const thread = runtime.thread; + if ( + thread.task_meta?.task_id === taskId || + (thread.is_task_chat && thread.id.includes(taskId)) + ) { + listenerApi.dispatch(closeThread({ id: threadId, force: true })); + } + } + + listenerApi.dispatch(closeTask(taskId)); }, }); diff --git a/refact-agent/gui/src/app/storage.ts b/refact-agent/gui/src/app/storage.ts index 3e4d18558..08d841e09 100644 --- a/refact-agent/gui/src/app/storage.ts +++ b/refact-agent/gui/src/app/storage.ts @@ -1,55 +1,4 @@ import type { WebStorage } from "redux-persist"; -import { - ChatHistoryItem, - HistoryState, -} from "../features/History/historySlice"; -import { parseOrElse } from "../utils"; - -type StoredState = { - tipOfTheDay: string; - tour: string; - history: string; -}; - -function getOldest(history: HistoryState): ChatHistoryItem | null { - const sorted = Object.values(history).sort((a, b) => { - return new Date(a.updatedAt).getTime() - new Date(b.updatedAt).getTime(); - }); - const oldest = sorted[0] ?? null; - return oldest; -} - -function prune(key: string, stored: StoredState) { - const history = parseOrElse(stored.history, {}); - const oldest = getOldest(history); - - if (!oldest) return; - const nextHistory = Object.values(history).reduce( - (acc, cur) => { - if (cur.id === oldest.id) return acc; - return { ...acc, [cur.id]: cur }; - }, - {}, - ); - const nextStorage = { ...stored, history: JSON.stringify(nextHistory) }; - try { - const newHistory = JSON.stringify(nextStorage); - localStorage.setItem(key, newHistory); - } catch (e) { - prune(key, nextStorage); - } -} - -function pruneHistory(key: string, item: string) { - const storedString = item; - if (!storedString) return; - try { - const stored = JSON.parse(storedString) as StoredState; - prune(key, stored); - } catch (e) { - /* empty */ - } -} function removeOldEntry(key: string) { if ( @@ -63,7 +12,6 @@ function removeOldEntry(key: string) { function cleanOldEntries() { if (typeof localStorage === "undefined") return; - removeOldEntry("tour"); removeOldEntry("tipOfTheDay"); removeOldEntry("chatHistory"); } @@ -72,22 +20,22 @@ export function storage(): WebStorage { cleanOldEntries(); return { getItem(key: string): Promise { - return new Promise((resolve, _reject) => { + return new Promise((resolve) => { resolve(localStorage.getItem(key)); }); }, setItem(key: string, item: string): Promise { - return new Promise((resolve, _reject) => { + return new Promise((resolve) => { try { localStorage.setItem(key, item); } catch { - pruneHistory(key, item); + // Storage quota exceeded, ignore } resolve(); }); }, removeItem(key: string): Promise { - return new Promise((resolve, _reject) => { + return new Promise((resolve) => { localStorage.removeItem(key); resolve(); }); diff --git a/refact-agent/gui/src/app/store.ts b/refact-agent/gui/src/app/store.ts index b9a4ee02c..e61ff5ed4 100644 --- a/refact-agent/gui/src/app/store.ts +++ b/refact-agent/gui/src/app/store.ts @@ -1,5 +1,6 @@ import { combineSlices, configureStore } from "@reduxjs/toolkit"; import { storage } from "./storage"; +import { pruneStaleDraftMessages } from "../utils/threadStorage"; import { FLUSH, PAUSE, @@ -22,13 +23,19 @@ import { dockerApi, telemetryApi, knowledgeApi, + knowledgeGraphApi, providersApi, modelsApi, teamsApi, + trajectoriesApi, + trajectoryApi, + tasksApi, } from "../services/refact"; +import { chatModesApi } from "../services/refact/chatModes"; +import { customizationApi } from "../services/refact/customization"; +import { projectInformationApi } from "../services/refact/projectInformation"; import { smallCloudApi } from "../services/smallcloud"; import { reducer as fimReducer } from "../features/FIM/reducer"; -import { tourReducer } from "../features/Tour"; import { tipOfTheDaySlice } from "../features/TipOfTheDay"; import { reducer as configReducer } from "../features/Config/configSlice"; import { activeFileReducer } from "../features/Chat/activeFile"; @@ -44,8 +51,6 @@ import { pagesSlice } from "../features/Pages/pagesSlice"; import mergeInitialState from "redux-persist/lib/stateReconciler/autoMergeLevel2"; import { listenerMiddleware } from "./middleware"; import { informationSlice } from "../features/Errors/informationSlice"; -import { confirmationSlice } from "../features/ToolConfirmation/confirmationSlice"; -import { attachedImagesSlice } from "../features/AttachedImages"; import { teamsSlice } from "../features/Teams"; import { userSurveySlice } from "../features/UserSurvey/userSurveySlice"; import { linksApi } from "../services/refact/links"; @@ -55,6 +60,8 @@ import { checkpointsSlice } from "../features/Checkpoints/checkpointsSlice"; import { checkpointsApi } from "../services/refact/checkpoints"; import { patchesAndDiffsTrackerSlice } from "../features/PatchesAndDiffsTracker/patchesAndDiffsTrackerSlice"; import { coinBallanceSlice } from "../features/CoinBalance"; +import { tasksSlice } from "../features/Tasks"; +import { connectionSlice } from "../features/Connection"; const tipOfTheDayPersistConfig = { key: "totd", @@ -72,7 +79,6 @@ const persistedTipOfTheDayReducer = persistReducer< const rootReducer = combineSlices( { fim: fimReducer, - tour: tourReducer, // tipOfTheDay: persistedTipOfTheDayReducer, [tipOfTheDaySlice.reducerPath]: persistedTipOfTheDayReducer, config: configReducer, @@ -92,9 +98,16 @@ const rootReducer = combineSlices( [checkpointsApi.reducerPath]: checkpointsApi.reducer, [telemetryApi.reducerPath]: telemetryApi.reducer, [knowledgeApi.reducerPath]: knowledgeApi.reducer, + [knowledgeGraphApi.reducerPath]: knowledgeGraphApi.reducer, [teamsApi.reducerPath]: teamsApi.reducer, [providersApi.reducerPath]: providersApi.reducer, [modelsApi.reducerPath]: modelsApi.reducer, + [trajectoriesApi.reducerPath]: trajectoriesApi.reducer, + [trajectoryApi.reducerPath]: trajectoryApi.reducer, + [tasksApi.reducerPath]: tasksApi.reducer, + [chatModesApi.reducerPath]: chatModesApi.reducer, + [customizationApi.reducerPath]: customizationApi.reducer, + [projectInformationApi.reducerPath]: projectInformationApi.reducer, }, historySlice, errorSlice, @@ -102,20 +115,20 @@ const rootReducer = combineSlices( pagesSlice, integrationsApi, dockerApi, - confirmationSlice, - attachedImagesSlice, userSurveySlice, teamsSlice, integrationsSlice, checkpointsSlice, patchesAndDiffsTrackerSlice, coinBallanceSlice, + tasksSlice, + connectionSlice, ); const rootPersistConfig = { key: "root", storage: storage(), - whitelist: [historySlice.reducerPath, "tour", userSurveySlice.reducerPath], + whitelist: [userSurveySlice.reducerPath], stateReconciler: mergeInitialState, }; @@ -159,36 +172,43 @@ export function setUpStore(preloadedState?: Partial) { }, }); - return ( - middleware - .prepend( - pingApi.middleware, - statisticsApi.middleware, - capsApi.middleware, - promptsApi.middleware, - toolsApi.middleware, - commandsApi.middleware, - smallCloudApi.middleware, - pathApi.middleware, - linksApi.middleware, - integrationsApi.middleware, - dockerApi.middleware, - checkpointsApi.middleware, - telemetryApi.middleware, - knowledgeApi.middleware, - providersApi.middleware, - modelsApi.middleware, - teamsApi.middleware, - ) - .prepend(historyMiddleware.middleware) - // .prepend(errorMiddleware.middleware) - .prepend(listenerMiddleware.middleware) - ); + return middleware + .prepend( + pingApi.middleware, + statisticsApi.middleware, + capsApi.middleware, + promptsApi.middleware, + toolsApi.middleware, + commandsApi.middleware, + smallCloudApi.middleware, + pathApi.middleware, + linksApi.middleware, + integrationsApi.middleware, + dockerApi.middleware, + checkpointsApi.middleware, + telemetryApi.middleware, + knowledgeApi.middleware, + knowledgeGraphApi.middleware, + providersApi.middleware, + modelsApi.middleware, + teamsApi.middleware, + trajectoriesApi.middleware, + trajectoryApi.middleware, + tasksApi.middleware, + chatModesApi.middleware, + customizationApi.middleware, + projectInformationApi.middleware, + ) + .prepend(historyMiddleware.middleware) + .prepend(listenerMiddleware.middleware); }, }); return store; } + +pruneStaleDraftMessages(); + export const store = setUpStore(); export type Store = typeof store; diff --git a/refact-agent/gui/src/components/AtCommands/AtCommandChip.module.css b/refact-agent/gui/src/components/AtCommands/AtCommandChip.module.css new file mode 100644 index 000000000..145772daa --- /dev/null +++ b/refact-agent/gui/src/components/AtCommands/AtCommandChip.module.css @@ -0,0 +1,39 @@ +.chip { + display: inline-flex; + align-items: center; + gap: var(--space-1); + padding: 0 var(--space-2); + background: var(--gray-a3); + border-radius: var(--radius-2); + cursor: pointer; + transition: background 0.15s; + vertical-align: middle; + line-height: var(--line-height-2); + max-width: 200px; +} + +.chip:hover:not(.disabled) { + background: var(--gray-a4); +} + +.chip:focus-visible { + outline: 2px solid var(--accent-8); + outline-offset: 1px; +} + +.disabled { + cursor: default; + opacity: 0.6; +} + +.icon { + font-size: 12px; + flex-shrink: 0; +} + +.label { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + color: var(--gray-11); +} diff --git a/refact-agent/gui/src/components/AtCommands/AtCommandChip.tsx b/refact-agent/gui/src/components/AtCommands/AtCommandChip.tsx new file mode 100644 index 000000000..b48566181 --- /dev/null +++ b/refact-agent/gui/src/components/AtCommands/AtCommandChip.tsx @@ -0,0 +1,47 @@ +import React from "react"; +import { Text } from "@radix-ui/themes"; +import type { ChipDisplayInfo } from "../../utils/atCommands"; +import styles from "./AtCommandChip.module.css"; + +type AtCommandChipProps = { + chip: ChipDisplayInfo; + onClick?: () => void; +}; + +export const AtCommandChip: React.FC = ({ + chip, + onClick, +}) => { + const handleClick = (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + if (!chip.disabled && onClick) { + onClick(); + } + }; + + const handleKeyDown = (e: React.KeyboardEvent) => { + if ((e.key === "Enter" || e.key === " ") && !chip.disabled && onClick) { + e.preventDefault(); + e.stopPropagation(); + onClick(); + } + }; + + return ( + + {chip.icon} + + {chip.label} + + + ); +}; diff --git a/refact-agent/gui/src/components/AtCommands/index.ts b/refact-agent/gui/src/components/AtCommands/index.ts new file mode 100644 index 000000000..a6a54a055 --- /dev/null +++ b/refact-agent/gui/src/components/AtCommands/index.ts @@ -0,0 +1 @@ +export { AtCommandChip } from "./AtCommandChip"; diff --git a/refact-agent/gui/src/components/Buttons/Buttons.tsx b/refact-agent/gui/src/components/Buttons/Buttons.tsx index 2cbaa4681..de0002d58 100644 --- a/refact-agent/gui/src/components/Buttons/Buttons.tsx +++ b/refact-agent/gui/src/components/Buttons/Buttons.tsx @@ -1,5 +1,5 @@ import React, { forwardRef, useCallback } from "react"; -import { IconButton, Button, Flex } from "@radix-ui/themes"; +import { IconButton, Button, Flex, HoverCard, Text } from "@radix-ui/themes"; import { PaperPlaneIcon, ExitIcon, @@ -8,6 +8,7 @@ import { } from "@radix-ui/react-icons"; import classNames from "classnames"; import styles from "./button.module.css"; +import iconStyles from "./iconButton.module.css"; import { useOpenUrl } from "../../hooks/useOpenUrl"; import { useAppSelector } from "../../hooks"; import { selectApiKey } from "../../features/Config/configSlice"; @@ -21,13 +22,30 @@ export const PaperPlaneButton: React.FC = (props) => ( ); +type PlainButtonProps = React.ButtonHTMLAttributes; + export const AgentIntegrationsButton = forwardRef< HTMLButtonElement, - IconButtonProps + PlainButtonProps >((props, ref) => ( - - - + + + + + + + Set up Agent Integrations + + + )); AgentIntegrationsButton.displayName = "AgentIntegrationsButton"; @@ -38,10 +56,24 @@ export const ThreadHistoryButton: React.FC = (props) => ( ); -export const BackToSideBarButton: React.FC = (props) => ( - - - +export const BackToSideBarButton: React.FC = (props) => ( + + + + + + + Return to sidebar + + + ); export const CloseButton: React.FC< diff --git a/refact-agent/gui/src/components/Buttons/ContextCapButton.tsx b/refact-agent/gui/src/components/Buttons/ContextCapButton.tsx index f832a28a1..c2472cc57 100644 --- a/refact-agent/gui/src/components/Buttons/ContextCapButton.tsx +++ b/refact-agent/gui/src/components/Buttons/ContextCapButton.tsx @@ -8,7 +8,7 @@ import { } from "../../features/Chat/Thread"; import { Select, type SelectProps } from "../Select"; -import { Skeleton } from "@radix-ui/themes"; +import { Skeleton, HoverCard, Text } from "@radix-ui/themes"; const formatContextSize = (tokens: number): string => { if (tokens >= 1000000) { @@ -132,12 +132,23 @@ export const ContextCapButton: React.FC = () => { const selectKey = `${threadModel}-${maxTokens}`; return ( - + + + + + Context cap + + + ); }; diff --git a/refact-agent/gui/src/components/Buttons/SendButton.tsx b/refact-agent/gui/src/components/Buttons/SendButton.tsx index 3c3dc829a..dd74d1c95 100644 --- a/refact-agent/gui/src/components/Buttons/SendButton.tsx +++ b/refact-agent/gui/src/components/Buttons/SendButton.tsx @@ -1,5 +1,12 @@ import React from "react"; -import { DropdownMenu, IconButton, Flex, Badge } from "@radix-ui/themes"; +import { + DropdownMenu, + IconButton, + Flex, + Badge, + HoverCard, + Text, +} from "@radix-ui/themes"; import { PaperPlaneIcon, CaretDownIcon, @@ -38,19 +45,28 @@ export const SendButtonWithDropdown: React.FC = ({ {queuedCount} )} - { - e.preventDefault(); - onSend(); - }} - > - - + + + { + e.preventDefault(); + onSend(); + }} + > + + + + + + Send message + + +
); } @@ -68,30 +84,39 @@ export const SendButtonWithDropdown: React.FC = ({ {queuedCount} )} - - - - - - - + + + + + + + + + - - onSend()}> - - Queue message - - onSendImmediately()}> - - Send next - - - + + onSend()}> + + Queue message + + onSendImmediately()}> + + Send next + + + + + + + Send options + + + ); }; diff --git a/refact-agent/gui/src/components/Buttons/ThinkingButton.tsx b/refact-agent/gui/src/components/Buttons/ThinkingButton.tsx index f35cdc47e..f0f13e629 100644 --- a/refact-agent/gui/src/components/Buttons/ThinkingButton.tsx +++ b/refact-agent/gui/src/components/Buttons/ThinkingButton.tsx @@ -20,6 +20,7 @@ export const ThinkingButton: React.FC = () => { shouldBeTeasing, noteText, areCapsInitialized, + supportsBoostReasoning, } = useThinking(); const { startPollingForUser } = useStartPollingForUser(); @@ -32,6 +33,10 @@ export const ThinkingButton: React.FC = () => { ); } + if (!supportsBoostReasoning) { + return null; + } + return ( diff --git a/refact-agent/gui/src/components/Buttons/UnifiedSendButton.module.css b/refact-agent/gui/src/components/Buttons/UnifiedSendButton.module.css new file mode 100644 index 000000000..21651fd0c --- /dev/null +++ b/refact-agent/gui/src/components/Buttons/UnifiedSendButton.module.css @@ -0,0 +1,20 @@ +/* Use shared iconButton with variants */ +.iconButton { + composes: iconButton from "../shared/iconButton.module.css"; +} + +.stop { + composes: stop from "../shared/iconButton.module.css"; +} + +.send { + composes: send from "../shared/iconButton.module.css"; +} + +.queue { + composes: queue from "../shared/iconButton.module.css"; +} + +.priority { + composes: priority from "../shared/iconButton.module.css"; +} diff --git a/refact-agent/gui/src/components/Buttons/UnifiedSendButton.tsx b/refact-agent/gui/src/components/Buttons/UnifiedSendButton.tsx new file mode 100644 index 000000000..16c5e4942 --- /dev/null +++ b/refact-agent/gui/src/components/Buttons/UnifiedSendButton.tsx @@ -0,0 +1,249 @@ +import React from "react"; +import { Flex, Badge, HoverCard, Text } from "@radix-ui/themes"; +import { + PaperPlaneIcon, + ClockIcon, + LightningBoltIcon, + StopIcon, +} from "@radix-ui/react-icons"; +import classNames from "classnames"; +import styles from "./UnifiedSendButton.module.css"; + +type UnifiedSendButtonProps = { + disabled?: boolean; + isStreaming?: boolean; + hasText: boolean; + hasMessages: boolean; + queuedCount?: number; + onSend: () => void; + onSendImmediately: () => void; + onStop: () => void; + onResend: () => void; +}; + +const ResendIcon: React.FC = () => { + return ( + + + + ); +}; + +export const UnifiedSendButton: React.FC = ({ + disabled, + isStreaming, + hasText, + hasMessages, + queuedCount = 0, + onSend, + onSendImmediately, + onStop, + onResend, +}) => { + if (isStreaming) { + if (hasText) { + return ( + + {queuedCount > 0 && ( + + + {queuedCount} + + )} + + + + + + + Stop generation + + + + + + + + + + Send immediately (next turn) + + + + + + + + + + Queue message (after tools complete) + + + + + ); + } + + return ( + + {queuedCount > 0 && ( + + + {queuedCount} + + )} + + + + + + + Stop generation + + + + + ); + } + + if (!hasText && hasMessages) { + return ( + + {queuedCount > 0 && ( + + + {queuedCount} + + )} + + + + + + + Resend last messages + + + + + ); + } + + return ( + + {queuedCount > 0 && ( + + + {queuedCount} + + )} + + + + + + + Send message + + + + + ); +}; + +export default UnifiedSendButton; diff --git a/refact-agent/gui/src/components/Buttons/iconButton.module.css b/refact-agent/gui/src/components/Buttons/iconButton.module.css new file mode 100644 index 000000000..40ca45739 --- /dev/null +++ b/refact-agent/gui/src/components/Buttons/iconButton.module.css @@ -0,0 +1,4 @@ +/* Re-export shared iconButton for backward compatibility */ +.iconButton { + composes: iconButton from "../shared/iconButton.module.css"; +} diff --git a/refact-agent/gui/src/components/Buttons/index.tsx b/refact-agent/gui/src/components/Buttons/index.tsx index 19e422296..054709fb3 100644 --- a/refact-agent/gui/src/components/Buttons/index.tsx +++ b/refact-agent/gui/src/components/Buttons/index.tsx @@ -12,3 +12,4 @@ export { ThinkingButton } from "./ThinkingButton"; export { ContextCapButton } from "./ContextCapButton"; export { FadedButton } from "./FadedButton"; export { SendButtonWithDropdown } from "./SendButton"; +export { UnifiedSendButton } from "./UnifiedSendButton"; diff --git a/refact-agent/gui/src/components/Callout/Callout.module.css b/refact-agent/gui/src/components/Callout/Callout.module.css index c91435ffc..1a472c4fe 100644 --- a/refact-agent/gui/src/components/Callout/Callout.module.css +++ b/refact-agent/gui/src/components/Callout/Callout.module.css @@ -11,17 +11,15 @@ transform 0.3s ease-in-out, visibility 0.3s ease-in-out, opacity 0.3s ease-in-out; + /* Force GPU compositing to fix JCEF repaint issues in JetBrains IDEs */ + will-change: transform, opacity, visibility; } -.callout_box_background { - background-color: var(--orange-3) !important; - position: absolute; - top: 0; - left: -15px; - right: 0; - bottom: 0; - z-index: -1; - border-radius: 10px; +.callout_box_warning { + --callout-a2: var(--orange-a2); + --callout-a3: var(--orange-a3); + --callout-a9: var(--orange-9); + background-color: var(--orange-3); } .callout_box_opened { diff --git a/refact-agent/gui/src/components/Callout/Callout.tsx b/refact-agent/gui/src/components/Callout/Callout.tsx index 341a1fcd0..485b28d2e 100644 --- a/refact-agent/gui/src/components/Callout/Callout.tsx +++ b/refact-agent/gui/src/components/Callout/Callout.tsx @@ -59,8 +59,10 @@ export const Callout: React.FC = ({ }, []); const handleRetryClick = () => { - // TBD: why was this added, it won't close on click :/? - if (preventClose) return; + if (preventClose) { + onClick(); + return; + } setIsOpened(false); const timeoutId = setTimeout(() => { onClick(); @@ -79,11 +81,11 @@ export const Callout: React.FC = ({ styles.callout_box, { [styles.callout_box_opened]: isOpened, + [styles.callout_box_warning]: type === "warning", }, props.className, )} > - {type === "warning" &&
} {type === "error" ? : } diff --git a/refact-agent/gui/src/components/Chat/Chat.stories.tsx b/refact-agent/gui/src/components/Chat/Chat.stories.tsx index 5fe2aaf3f..dd4b35085 100644 --- a/refact-agent/gui/src/components/Chat/Chat.stories.tsx +++ b/refact-agent/gui/src/components/Chat/Chat.stories.tsx @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-non-null-assertion */ import React from "react"; import type { Meta, StoryObj } from "@storybook/react"; import { Chat } from "./Chat"; @@ -20,9 +21,7 @@ import { goodTools, noTools, // noChatLinks, - makeKnowledgeFromChat, } from "../../__fixtures__/msw"; -import { TourProvider } from "../../features/Tour"; import { Flex } from "@radix-ui/themes"; import { http, HttpResponse } from "msw"; @@ -38,22 +37,36 @@ const Template: React.FC<{ wasSuggested: false, }, }; + const threadId = threadData.id; const store = setUpStore({ - tour: { - type: "finished", - }, chat: { - streaming: false, - prevent_send: false, - waiting_for_response: false, + current_thread_id: threadId, + open_thread_ids: [threadId], + threads: { + [threadId]: { + thread: threadData, + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, + }, + snapshot_received: true, + task_widget_expanded: false, + }, + }, max_new_tokens: 4096, tool_use: "agent", - send_immediately: false, - error: null, - cache: {}, system_prompt: {}, - thread: threadData, - queued_messages: [], + sse_refresh_requested: null, + stream_version: 0, }, config, }); @@ -61,19 +74,17 @@ const Template: React.FC<{ return ( - - - - ({})} - maybeSendToSidebar={() => ({})} - /> - - - + + + ({})} + maybeSendToSidebar={() => ({})} + /> + + ); @@ -105,7 +116,8 @@ export const Primary: Story = {}; export const Configuration: Story = { args: { - thread: CHAT_CONFIG_THREAD.thread, + thread: + CHAT_CONFIG_THREAD.threads[CHAT_CONFIG_THREAD.current_thread_id]!.thread, }, }; @@ -148,7 +160,6 @@ export const Knowledge: Story = { // noChatLinks, chatLinks, noTools, - makeKnowledgeFromChat, ], }, }, @@ -190,7 +201,6 @@ export const EmptySpaceAtBottom: Story = { // noChatLinks, chatLinks, noTools, - makeKnowledgeFromChat, ], }, }, @@ -271,7 +281,6 @@ export const UserMessageEmptySpaceAtBottom: Story = { // noChatLinks, chatLinks, noTools, - makeKnowledgeFromChat, ], }, }, @@ -354,7 +363,6 @@ export const CompressButton: Story = { // noChatLinks, chatLinks, noTools, - makeKnowledgeFromChat, ], }, }, @@ -381,7 +389,6 @@ export const LowBalance: Story = { goodPrompts, chatLinks, noTools, - makeKnowledgeFromChat, lowBalance, }, }, diff --git a/refact-agent/gui/src/components/Chat/Chat.tsx b/refact-agent/gui/src/components/Chat/Chat.tsx index 60cddfc26..e3ed0807a 100644 --- a/refact-agent/gui/src/components/Chat/Chat.tsx +++ b/refact-agent/gui/src/components/Chat/Chat.tsx @@ -1,30 +1,19 @@ import React, { useCallback, useState } from "react"; import { ChatForm, ChatFormProps } from "../ChatForm"; import { ChatContent } from "../ChatContent"; -import { Flex, Button, Text, Card } from "@radix-ui/themes"; -import { - useAppSelector, - useAppDispatch, - useSendChatRequest, - useAutoSend, -} from "../../hooks"; +import { Flex, Button, Card, Container } from "@radix-ui/themes"; +import { useAppSelector, useAppDispatch, useChatActions } from "../../hooks"; import { type Config } from "../../features/Config/configSlice"; import { enableSend, selectIsStreaming, selectPreventSend, selectChatId, - selectMessages, - getSelectedToolUse, - selectThreadNewChatSuggested, } from "../../features/Chat/Thread"; -import { ThreadHistoryButton } from "../Buttons"; -import { push } from "../../features/Pages/pagesSlice"; import { DropzoneProvider } from "../Dropzone"; import { useCheckpoints } from "../../hooks/useCheckpoints"; import { Checkpoints } from "../../features/Checkpoints"; -import { SuggestNewChat } from "../ChatForm/SuggestNewChat"; -import { EnhancedModelSelector } from "./EnhancedModelSelector"; +import { TaskProgressWidget } from "../TaskProgressWidget"; export type ChatProps = { host: Config["host"]; @@ -46,23 +35,18 @@ export const Chat: React.FC = ({ const isStreaming = useAppSelector(selectIsStreaming); const chatId = useAppSelector(selectChatId); - const { submit, abort, retryFromIndex } = useSendChatRequest(); - const chatToolUse = useAppSelector(getSelectedToolUse); - const threadNewChatSuggested = useAppSelector(selectThreadNewChatSuggested); - const messages = useAppSelector(selectMessages); + const { submit, abort, retryFromIndex } = useChatActions(); const { shouldCheckpointsPopupBeShown } = useCheckpoints(); - const [isDebugChatHistoryVisible, setIsDebugChatHistoryVisible] = - useState(false); - const preventSend = useAppSelector(selectPreventSend); const onEnableSend = () => dispatch(enableSend({ id: chatId })); const handleSubmit = useCallback( (value: string, sendPolicy?: "immediate" | "after_flow") => { - submit({ question: value, sendPolicy }); + const priority = sendPolicy === "immediate"; + void submit(value, priority); if (isViewingRawJSON) { setIsViewingRawJSON(false); } @@ -70,83 +54,58 @@ export const Chat: React.FC = ({ [submit, isViewingRawJSON], ); - const handleThreadHistoryPage = useCallback(() => { - dispatch(push({ name: "thread history page", chatId })); - }, [chatId, dispatch]); + const handleAbort = useCallback(() => { + void abort(); + }, [abort]); - useAutoSend(); + const handleRetry = useCallback( + (index: number, content: Parameters[1]) => { + void retryFromIndex(index, content); + }, + [retryFromIndex], + ); return ( - - - {shouldCheckpointsPopupBeShown && } - - - {!isStreaming && preventSend && unCalledTools && ( - - - - Chat was interrupted with uncalled tools calls. - - - - - )} - - + + + - - {/* Two flexboxes are left for the future UI element on the right side */} - {messages.length > 0 && ( - - - - - • - - setIsDebugChatHistoryVisible((prev) => !prev)} - style={{ cursor: "pointer" }} - > - mode: {chatToolUse} - - - {messages.length !== 0 && - !isStreaming && - isDebugChatHistoryVisible && ( - - )} + + + + + + {shouldCheckpointsPopupBeShown && } + + {!isStreaming && preventSend && unCalledTools && ( + + + + Chat was interrupted with uncalled tools calls. + + + )} + + + + diff --git a/refact-agent/gui/src/components/Chat/EnhancedModelSelector.tsx b/refact-agent/gui/src/components/Chat/EnhancedModelSelector.tsx index 76fdf3660..ab8e386cb 100644 --- a/refact-agent/gui/src/components/Chat/EnhancedModelSelector.tsx +++ b/refact-agent/gui/src/components/Chat/EnhancedModelSelector.tsx @@ -57,8 +57,9 @@ function extractCapabilitiesFromCaps( supportsMultimodality: capsModel.supports_multimodality, supportsClicks: capsModel.supports_clicks, supportsAgent: capsModel.supports_agent, - supportsReasoning: capsModel.supports_reasoning, - supportsBoostReasoning: capsModel.supports_boost_reasoning, + reasoningEffortOptions: capsModel.reasoning_effort_options, + supportsThinkingBudget: capsModel.supports_thinking_budget, + supportsAdaptiveThinkingBudget: capsModel.supports_adaptive_thinking_budget, }; } diff --git a/refact-agent/gui/src/components/Chat/ModelSelector.tsx b/refact-agent/gui/src/components/Chat/ModelSelector.tsx index 7a67fdefa..e268c7ee2 100644 --- a/refact-agent/gui/src/components/Chat/ModelSelector.tsx +++ b/refact-agent/gui/src/components/Chat/ModelSelector.tsx @@ -1,62 +1,129 @@ import React, { useMemo } from "react"; import { Select, Text, Flex } from "@radix-ui/themes"; import { useCapsForToolUse } from "../../hooks"; +import { useGetCapsQuery } from "../../services/refact/caps"; import { RichModelSelectItem } from "../Select/RichModelSelectItem"; import { enrichAndGroupModels } from "../../utils/enrichModels"; import styles from "../Select/select.module.css"; export type ModelSelectorProps = { disabled?: boolean; + value: string | undefined; + onValueChange: (model: string) => void; + label?: string; + showLabel?: boolean; + compact?: boolean; + defaultValue?: string; }; -export const ModelSelector: React.FC = ({ disabled }) => { +export const ModelSelector: React.FC = ({ + disabled, + value, + onValueChange, + label = "model:", + showLabel = true, + compact = true, + defaultValue, +}) => { const capsForToolUse = useCapsForToolUse(); + const { data: caps } = useGetCapsQuery(undefined); + + const capsData = caps ?? capsForToolUse.data; + + const usableModels = capsForToolUse.usableModelsForPlan; const groupedModels = useMemo( - () => - enrichAndGroupModels( - capsForToolUse.usableModelsForPlan, - capsForToolUse.data, - ), - [capsForToolUse.usableModelsForPlan, capsForToolUse.data], + () => enrichAndGroupModels(usableModels, capsData), + [usableModels, capsData], ); - const currentModelName = capsForToolUse.currentModel.replace(/^refact\//, ""); + const defaultModel = defaultValue ?? capsData?.chat_default_model ?? ""; + const effectiveValue = value ?? defaultModel; + const currentModelName = effectiveValue.replace(/^refact\//, ""); - if (!capsForToolUse.data || groupedModels.length === 0) { + if (!capsData || groupedModels.length === 0) { return ( - - model: {currentModelName} + + {showLabel ? `${label} ` : ""} + {currentModelName || "No models"} ); } + if (compact) { + return ( + + {showLabel && ( + + {label} + + )} + + + + {groupedModels.map((group) => ( + + {group.displayName} + {group.models.map((model) => ( + + {model.value} + + + + + ))} + + ))} + + + + ); + } + return ( - - - model: - + + {showLabel && ( + + {label} + + )} - + {groupedModels.map((group) => ( @@ -66,14 +133,12 @@ export const ModelSelector: React.FC = ({ disabled }) => { key={model.value} value={model.value} disabled={model.disabled} - textValue={model.displayName} + textValue={model.value} > - - {model.displayName} - + {model.value} void; + onDelete?: (messageId: string) => void; + contextFilesByToolId?: Record; + diffsByToolId?: Record; usage?: Usage | null; metering_coins_prompt?: number; metering_coins_generated?: number; metering_coins_cache_creation?: number; metering_coins_cache_read?: number; + isStreaming?: boolean; }; -export const AssistantInput: React.FC = ({ +const _AssistantInput: React.FC = ({ message, reasoningContent, + thinkingBlocks, toolCalls, serverExecutedTools, + serverContentBlocks, citations, - isLast, + messageId, + onBranch, + onDelete, + contextFilesByToolId, + diffsByToolId, usage, metering_coins_prompt, metering_coins_generated, metering_coins_cache_creation, metering_coins_cache_read, + isStreaming = false, }) => { const [sendTelemetryEvent] = telemetryApi.useLazySendTelemetryChatEventQuery(); @@ -85,26 +106,49 @@ export const AssistantInput: React.FC = ({ [sendTelemetryEvent], ); - const hasMessageFirst = !reasoningContent && message; + const combinedReasoning = useMemo(() => { + if (reasoningContent) { + return reasoningContent; + } + if (thinkingBlocks && thinkingBlocks.length > 0) { + const thinkingText = thinkingBlocks + .filter((block) => block.thinking) + .map((block) => block.thinking) + .join("\n\n"); + if (thinkingText) { + return thinkingText; + } + } + return null; + }, [reasoningContent, thinkingBlocks]); + + const handleCopyMessage = useCallback(() => { + if (message) { + handleCopy(message); + } + }, [message, handleCopy]); return ( - - - {reasoningContent && ( - + + {combinedReasoning && ( + + + + )} + + {!!serverContentBlocks?.length && ( + + + )} {message && ( - + {message} @@ -126,6 +170,7 @@ export const AssistantInput: React.FC = ({ @@ -136,6 +181,16 @@ export const AssistantInput: React.FC = ({ (citation, idx, arr) => arr.findIndex((c) => c.url === citation.url) === idx, ) + .filter((citation) => { + try { + const url = new URL(citation.url); + return ( + url.protocol === "http:" || url.protocol === "https:" + ); + } catch { + return false; + } + }) .map((citation, idx) => ( = ({ )} - {toolCalls && } - {isLast && ( - - - - - - + + {serverExecutedTools && serverExecutedTools.length > 0 && ( + )} - + + {toolCalls && ( + + )} + + ); }; + +export const AssistantInput = React.memo(_AssistantInput); diff --git a/refact-agent/gui/src/components/ChatContent/ChatContent.module.css b/refact-agent/gui/src/components/ChatContent/ChatContent.module.css index 8a0e328c5..b9957e1d5 100644 --- a/refact-agent/gui/src/components/ChatContent/ChatContent.module.css +++ b/refact-agent/gui/src/components/ChatContent/ChatContent.module.css @@ -1,5 +1,5 @@ .content { - /* max-width: calc(100% - 12px); */ + margin-right: 16px; } .file { @@ -24,25 +24,65 @@ flex-direction: column; gap: 0; text-align: left; - cursor: default; + cursor: pointer; user-select: auto; max-width: calc(100% - var(--space-2) * 4.5); - padding: var(--space-2); + padding: var(--space-2) var(--space-3); min-height: var(--base-button-height); height: auto; - white-space: pre; + white-space: pre-wrap; + word-break: break-word; + /* Mimic Button variant="soft" appearance */ + background-color: var(--accent-a3); + border-radius: var(--radius-3); +} + +.userInput:hover { + background-color: var(--accent-a4); +} + +/* Reset Markdown margins inside user messages - override Radix styles */ +.userInput :global(.rt-Text), +.userInput :global(.rt-Heading), +.userInput :global(p), +.userInput :global(h1), +.userInput :global(h2), +.userInput :global(h3), +.userInput :global(h4), +.userInput :global(h5), +.userInput :global(h6), +.userInput :global(ul), +.userInput :global(ol), +.userInput :global(pre) { + margin: 0 !important; + margin-top: 0 !important; + margin-bottom: 0 !important; } -.break_word { - word-break: break-word; +/* Add small spacing between consecutive elements */ +.userInput :global(.rt-Text + .rt-Text), +.userInput :global(.rt-Text + pre), +.userInput :global(pre + .rt-Text), +.userInput :global(p + p), +.userInput :global(p + pre), +.userInput :global(pre + p) { + margin-top: var(--space-1) !important; } -.tool_result { - width: 100%; +/* Proper spacing between list items */ +.userInput :global(li) { + margin-bottom: 2px; +} +.userInput :global(li:last-child) { + margin-bottom: 0; +} + +.break_word { + word-break: break-word; } .diff { - background: #252525; + background: var(--gray-2); max-width: unset; } @@ -88,29 +128,19 @@ DejaVu Sans Mono, Bitstream Vera Sans Mono, Courier New; - color: #c6cdd5; + color: var(--gray-11); } .diff_line_number { min-width: 50px; padding-right: 8px; text-align: end; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; user-select: none; } .diff_sign { padding-left: 6px; padding-right: 6px; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; user-select: none; } @@ -118,9 +148,8 @@ flex: 1; } -/** TODO: it seems the background doesn't grow inside of the scroll area */ - .tool_result { + width: 100%; margin-top: var(--space-1); box-sizing: border-box; } @@ -148,20 +177,32 @@ margin-top: 0 !important; } +.queuedMessagesContainer { + position: absolute; + bottom: 60px; + right: 50px; + left: var(--space-2); + padding: var(--space-2); + pointer-events: none; + z-index: 5; +} + +.queuedMessagesContainer > * { + pointer-events: auto; +} + .queuedMessage { - --base-card-surface-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--amber-a5), var(--amber-5) 25%); - background: var(--color-surface); + --base-card-surface-box-shadow: 0 0 0 1px var(--amber-6); + background: color-mix(in oklab, var(--amber-3) 40%, var(--color-surface)); border-radius: var(--radius-2); padding: var(--space-2) var(--space-3); margin-left: auto; max-width: 85%; - opacity: 0.8; } .queuedMessagePriority { - --base-card-surface-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--blue-a5), var(--blue-5) 25%); + --base-card-surface-box-shadow: 0 0 0 1px var(--blue-6); + background: color-mix(in oklab, var(--blue-3) 40%, var(--color-surface)); } .queuedMessageText { @@ -173,3 +214,20 @@ white-space: pre-wrap; word-break: break-word; } + +.plainTextTrigger { + cursor: pointer; + color: var(--gray-10); +} + +.plainTextTrigger:hover { + color: var(--gray-11); +} + +.virtuosoScroller { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + height: 100%; + + /* Keep gutter stable so content doesn't shift when thumb appears. */ + scrollbar-gutter: stable; +} diff --git a/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx b/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx index e37fb28c4..846ca096c 100644 --- a/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx +++ b/refact-agent/gui/src/components/ChatContent/ChatContent.stories.tsx @@ -1,3 +1,4 @@ +/* eslint-disable @typescript-eslint/no-non-null-assertion */ import React from "react"; import type { Meta, StoryObj } from "@storybook/react"; import { ChatContent } from "."; @@ -27,7 +28,6 @@ import { goodPing, goodPrompts, goodUser, - makeKnowledgeFromChat, noCommandPreview, noCompletions, noTools, @@ -46,19 +46,36 @@ const MockedStore: React.FC<{ wasSuggested: false, }, }; + const threadId = threadData.id; const store = setUpStore({ chat: { - streaming: false, - prevent_send: false, - waiting_for_response: false, + current_thread_id: threadId, + open_thread_ids: [threadId], + threads: { + [threadId]: { + thread: threadData, + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, + }, + snapshot_received: true, + task_widget_expanded: false, + }, + }, max_new_tokens: 4096, tool_use: "quick", - send_immediately: false, - error: null, - cache: {}, system_prompt: {}, - thread: threadData, - queued_messages: [], + sse_refresh_requested: null, + stream_version: 0, }, }); @@ -147,7 +164,8 @@ export const MultiModal: Story = { export const IntegrationChat: Story = { args: { - thread: CHAT_CONFIG_THREAD.thread, + thread: + CHAT_CONFIG_THREAD.threads[CHAT_CONFIG_THREAD.current_thread_id]!.thread, }, parameters: { msw: { @@ -173,7 +191,7 @@ export const TextDoc: Story = { goodUser, // noChatLinks, noTools, - makeKnowledgeFromChat, + ToolConfirmation, noCompletions, noCommandPreview, @@ -195,7 +213,7 @@ export const MarkdownIssue: Story = { goodUser, // noChatLinks, noTools, - makeKnowledgeFromChat, + ToolConfirmation, noCompletions, noCommandPreview, @@ -237,7 +255,7 @@ export const ToolWaiting: Story = { goodUser, // noChatLinks, noTools, - makeKnowledgeFromChat, + ToolConfirmation, noCompletions, noCommandPreview, diff --git a/refact-agent/gui/src/components/ChatContent/ChatContent.tsx b/refact-agent/gui/src/components/ChatContent/ChatContent.tsx index c77da7238..f1bacc935 100644 --- a/refact-agent/gui/src/components/ChatContent/ChatContent.tsx +++ b/refact-agent/gui/src/components/ChatContent/ChatContent.tsx @@ -1,44 +1,65 @@ -import React, { useCallback, useMemo } from "react"; +import React, { + useCallback, + useMemo, + useEffect, + useState, + useRef, +} from "react"; +import { v4 as uuidv4 } from "uuid"; import { + AssistantMessage, + ChatContextFile, ChatMessages, - isAssistantMessage, + DiffChunk, + DiffMessage, isChatContextFileMessage, isDiffMessage, + isAssistantMessage, isToolMessage, - isUserMessage, + isSystemMessage, UserMessage, } from "../../services/refact"; import { UserInput } from "./UserInput"; -import { ScrollArea, ScrollAreaWithAnchor } from "../ScrollArea"; +import { ScrollArea } from "../ScrollArea"; import { Flex, Container, Button, Box } from "@radix-ui/themes"; import styles from "./ChatContent.module.css"; import { ContextFiles } from "./ContextFiles"; +import { SystemPrompt } from "./SystemPrompt"; import { AssistantInput } from "./AssistantInput"; import { PlainText } from "./PlainText"; -import { useAppDispatch, useDiffFileReload } from "../../hooks"; -import { useAppSelector } from "../../hooks"; +import { useAppDispatch, useAppSelector, useDiffFileReload } from "../../hooks"; import { selectIntegration, - selectIsStreaming, - selectIsWaiting, - selectMessages, - selectQueuedMessages, - selectThread, + selectIsStreamingById, + selectIsWaitingById, + selectMessagesById, + selectQueuedItemsById, + selectSnapshotReceivedById, + selectThreadById, + selectChatId, + selectThreadPauseById, } from "../../features/Chat/Thread/selectors"; -import { takeWhile } from "../../utils"; +import { + createChatWithId, + switchToThread, +} from "../../features/Chat/Thread/actions"; import { GroupedDiffs } from "./DiffContent"; import { popBackTo } from "../../features/Pages/pagesSlice"; import { ChatLinks, UncommittedChangesWarning } from "../ChatLinks"; -import { telemetryApi } from "../../services/refact/telemetry"; import { PlaceHolderText } from "./PlaceHolderText"; -import { UsageCounter } from "../UsageCounter"; import { QueuedMessage } from "./QueuedMessage"; -import { - getConfirmationPauseStatus, - getPauseReasonsWithPauseStatus, -} from "../../features/ToolConfirmation/confirmationSlice"; -import { useUsageCounter } from "../UsageCounter/useUsageCounter.ts"; +import { selectSseStatusForChat } from "../../features/Connection"; import { LogoAnimation } from "../LogoAnimation/LogoAnimation.tsx"; +import { ChatLoading } from "./ChatLoading"; +import { + removeMessage, + branchFromChat, +} from "../../services/refact/chatCommands"; +import { selectLspPort, selectApiKey } from "../../features/Config/configSlice"; +import { VirtualizedChatList } from "./VirtualizedChatList"; +import { useCollapsibleState } from "./useCollapsibleState"; +import { useCollapsibleStoreProvider } from "./useCollapsibleStoreProvider"; +import { CollapsibleStoreProvider } from "./useStoredOpen"; export type ChatContentProps = { onRetry: (index: number, question: UserMessage["content"]) => void; @@ -50,95 +71,167 @@ export const ChatContent: React.FC = ({ onRetry, }) => { const dispatch = useAppDispatch(); - const pauseReasonsWithPause = useAppSelector(getPauseReasonsWithPauseStatus); - const messages = useAppSelector(selectMessages); - const queuedMessages = useAppSelector(selectQueuedMessages); - const isStreaming = useAppSelector(selectIsStreaming); - const thread = useAppSelector(selectThread); - const { shouldShow } = useUsageCounter(); - const isConfig = thread.mode === "CONFIGURE"; - const isWaiting = useAppSelector(selectIsWaiting); - const [sendTelemetryEvent] = - telemetryApi.useLazySendTelemetryChatEventQuery(); + const chatId = useAppSelector(selectChatId); + const [renderChatId, setRenderChatId] = useState(chatId); + + useEffect(() => { + if (chatId === renderChatId) return; + const rafId = requestAnimationFrame(() => { + setRenderChatId(chatId); + }); + return () => cancelAnimationFrame(rafId); + }, [chatId, renderChatId]); + + const switching = chatId !== renderChatId; + + const messages = useAppSelector((s) => selectMessagesById(s, renderChatId)); + const queuedItems = useAppSelector((s) => + selectQueuedItemsById(s, renderChatId), + ); + const isStreaming = useAppSelector((s) => + selectIsStreamingById(s, renderChatId), + ); + const snapshotReceived = useAppSelector((s) => + selectSnapshotReceivedById(s, renderChatId), + ); + const thread = useAppSelector((s) => selectThreadById(s, renderChatId)); + const sseStatus = useAppSelector((s) => + selectSseStatusForChat(s, renderChatId), + ); + + const isConfig = thread !== null && thread.mode === "CONFIGURE"; + const isWaiting = useAppSelector((s) => selectIsWaitingById(s, renderChatId)); const integrationMeta = useAppSelector(selectIntegration); - const isWaitingForConfirmation = useAppSelector(getConfirmationPauseStatus); + const isWaitingForConfirmation = useAppSelector((s) => + selectThreadPauseById(s, renderChatId), + ); + const lspPort = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + + const collapsibleState = useCollapsibleState(false); + const collapsibleStore = useCollapsibleStoreProvider(renderChatId); + const prevChatIdRef = useRef(renderChatId); + const prevDisplayMessagesRef = useRef(null); + const prevDisplayItemsRef = useRef(null); + + useEffect(() => { + if (prevChatIdRef.current !== renderChatId) { + collapsibleState.reset(); + prevDisplayMessagesRef.current = null; + prevDisplayItemsRef.current = null; + prevChatIdRef.current = renderChatId; + } + }, [renderChatId, collapsibleState]); + + const handleBranch = useCallback( + (messageId: string) => { + const newChatId = uuidv4(); + const title = `[branched] ${thread?.title ?? "Chat"}`; + + dispatch( + createChatWithId({ + id: newChatId, + title, + }), + ); - const onRetryWrapper = (index: number, question: UserMessage["content"]) => { - onRetry(index, question); - }; + dispatch(switchToThread({ id: newChatId })); + + void branchFromChat( + newChatId, + renderChatId, + messageId, + lspPort, + apiKey ?? undefined, + ).catch((err) => { + // eslint-disable-next-line no-console + console.error("Failed to branch chat:", err); + }); + }, + [dispatch, thread?.title, renderChatId, lspPort, apiKey], + ); + + const handleDelete = useCallback( + (messageId: string) => { + void removeMessage( + renderChatId, + messageId, + lspPort, + apiKey ?? undefined, + ).catch((err) => { + // eslint-disable-next-line no-console + console.error("Failed to delete message:", err); + }); + }, + [renderChatId, lspPort, apiKey], + ); + + const onRetryWrapper = useCallback( + (index: number, question: UserMessage["content"]) => { + onRetry(index, question); + }, + [onRetry], + ); const handleReturnToConfigurationClick = useCallback(() => { - // console.log(`[DEBUG]: going back to configuration page`); - // TBD: should it be allowed to run in the background? onStopStreaming(); dispatch( popBackTo({ name: "integrations page", - projectPath: thread.integration?.project, - integrationName: thread.integration?.name, - integrationPath: thread.integration?.path, + projectPath: thread?.integration?.project, + integrationName: thread?.integration?.name, + integrationPath: thread?.integration?.path, wasOpenedThroughChat: true, }), ); }, [ onStopStreaming, dispatch, - thread.integration?.project, - thread.integration?.name, - thread.integration?.path, + thread?.integration?.project, + thread?.integration?.name, + thread?.integration?.path, ]); - const handleManualStopStreamingClick = useCallback(() => { - onStopStreaming(); - void sendTelemetryEvent({ - scope: `stopStreaming`, - success: true, - error_message: "", - }); - }, [onStopStreaming, sendTelemetryEvent]); - const shouldConfigButtonBeVisible = useMemo(() => { return isConfig && !integrationMeta?.path?.includes("project_summary"); }, [isConfig, integrationMeta?.path]); - // Dedicated hook for handling file reloads useDiffFileReload(); - return ( - - - {messages.length === 0 && ( - - - - )} - {renderMessages(messages, onRetryWrapper, isWaiting)} - {queuedMessages.length > 0 && ( - - {queuedMessages.map((queuedMsg, index) => ( - - ))} - - )} + const showLoading = + switching || + (!snapshotReceived && messages.length === 0) || + (sseStatus === "connecting" && messages.length === 0); + + const displayItems = useMemo(() => { + const prevMessages = prevDisplayMessagesRef.current; + const prevItems = prevDisplayItemsRef.current; + + const incremental = tryIncrementalDisplayItemsUpdate( + prevMessages, + messages, + prevItems, + isStreaming, + ); + + const nextItems = incremental ?? buildDisplayItems(messages, isStreaming); + + prevDisplayMessagesRef.current = messages; + prevDisplayItemsRef.current = nextItems; + + return nextItems; + }, [messages, isStreaming]); + + const initialScrollIndex = useMemo(() => { + return displayItems.length > 0 ? displayItems.length - 1 : undefined; + }, [displayItems]); + + const virtuosoFooter = useMemo( + () => ( + <> - {shouldShow && } {!isWaitingForConfirmation && ( = ({ /> )} + + ), + [isStreaming, isWaiting, isWaitingForConfirmation], + ); + + const renderDisplayItem = useCallback( + (item: DisplayItem): React.ReactNode => { + switch (item.type) { + case "plain_text": + return {item.content}</PlainText>; + + case "assistant": + return ( + <AssistantInput + message={item.message.content} + reasoningContent={item.message.reasoning_content} + thinkingBlocks={item.message.thinking_blocks} + toolCalls={item.message.tool_calls} + serverExecutedTools={item.message.server_executed_tools} + serverContentBlocks={item.message.server_content_blocks} + citations={item.message.citations} + messageId={item.message.message_id} + onBranch={handleBranch} + onDelete={handleDelete} + contextFilesByToolId={item.contextFilesByToolId} + diffsByToolId={item.diffsByToolId} + usage={item.message.usage} + metering_coins_prompt={item.message.metering_coins_prompt} + metering_coins_generated={item.message.metering_coins_generated} + metering_coins_cache_creation={ + item.message.metering_coins_cache_creation + } + metering_coins_cache_read={item.message.metering_coins_cache_read} + isStreaming={item.isStreaming} + /> + ); + + case "user": + return ( + <UserInput + onRetry={onRetryWrapper} + messageIndex={item.index} + messageId={item.message.message_id} + checkpoints={item.message.checkpoints} + onBranch={handleBranch} + onDelete={handleDelete} + > + {item.message.content} + </UserInput> + ); + + case "context_files": { + const stateKey = `context_files:${item.toolCallId ?? item.key}`; + return ( + <ContextFiles + files={item.files} + toolCallId={item.toolCallId} + open={collapsibleState.isOpen(stateKey)} + onOpenChange={(open) => collapsibleState.setOpen(stateKey, open)} + /> + ); + } + + case "diff_group": { + const stateKey = `diff_group:${item.key}`; + return ( + <GroupedDiffs + diffs={item.diffs} + open={collapsibleState.isOpen(stateKey)} + onOpenChange={(open) => collapsibleState.setOpen(stateKey, open)} + /> + ); + } + + case "system": + return <SystemPrompt content={item.content} />; + + default: + return null; + } + }, + [handleBranch, handleDelete, onRetryWrapper, collapsibleState], + ); + + if (showLoading) { + return ( + <Flex + direction="column" + className={styles.content} + data-element="ChatContent" + p="2" + gap="1" + style={{ flexGrow: 1, height: "100%" }} + > + <ChatLoading /> </Flex> + ); + } + if (messages.length === 0) { + return ( + <Flex + direction="column" + className={styles.content} + data-element="ChatContent" + p="2" + gap="1" + style={{ flexGrow: 1, height: "100%" }} + > + <Container> + <PlaceHolderText /> + </Container> + </Flex> + ); + } + + return ( + <CollapsibleStoreProvider value={collapsibleStore}> <Box - style={{ - position: "absolute", - bottom: 0, - maxWidth: "100%", // TODO: make space for the down button - }} + style={{ flexGrow: 1, height: "100%", position: "relative" }} + data-element="ChatContent" > - <ScrollArea scrollbars="horizontal"> - <Flex align="start" gap="3" pb="2"> - {(isWaiting || isStreaming) && !pauseReasonsWithPause.pause && ( - <Button - // ml="auto" - color="red" - title="stop streaming" - onClick={handleManualStopStreamingClick} - > - Stop - </Button> - )} - {shouldConfigButtonBeVisible && ( - <Button - // ml="auto" - color="gray" - title="Return to configuration page" - onClick={handleReturnToConfigurationClick} - > - Return - </Button> - )} - - <ChatLinks /> - </Flex> - </ScrollArea> + <VirtualizedChatList + key={renderChatId} + items={displayItems} + renderItem={renderDisplayItem} + initialScrollIndex={initialScrollIndex} + footer={virtuosoFooter} + isStreaming={isStreaming} + /> + + <Box + style={{ + position: "absolute", + bottom: 0, + maxWidth: "100%", + }} + > + <ScrollArea scrollbars="horizontal"> + <Flex align="start" gap="3" pb="2"> + {shouldConfigButtonBeVisible && ( + <Button + color="gray" + title="Return to configuration page" + onClick={handleReturnToConfigurationClick} + > + Return + </Button> + )} + <ChatLinks /> + </Flex> + </ScrollArea> + </Box> + + {queuedItems.length > 0 && ( + <Box className={styles.queuedMessagesContainer}> + <Flex direction="column" gap="2" align="end"> + {queuedItems.map((item, index) => ( + <QueuedMessage + key={item.client_request_id} + queuedItem={item} + position={index + 1} + /> + ))} + </Flex> + </Box> + )} </Box> - </ScrollAreaWithAnchor.ScrollArea> + </CollapsibleStoreProvider> ); }; ChatContent.displayName = "ChatContent"; -function renderMessages( - messages: ChatMessages, - onRetry: (index: number, question: UserMessage["content"]) => void, - waiting: boolean, - memo: React.ReactNode[] = [], - index = 0, -) { - if (messages.length === 0) return memo; - const [head, ...tail] = messages; - if (head.role === "tool") { - return renderMessages(tail, onRetry, waiting, memo, index + 1); +function getMessageKey(message: ChatMessages[number], index: number): string { + if (message.message_id) return message.message_id; + if ("tool_call_id" in message && message.tool_call_id) { + return `${message.role}-${message.tool_call_id}-${index}`; } + return `${message.role}-${index}`; +} + +const READ_TOOLS = new Set([ + "cat", + "tree", + "search_pattern", + "search_semantic", + "search_symbol_definition", + "web", + "web_search", + "knowledge", + "search_trajectories", + "get_trajectory_context", +]); + +const EDIT_TOOLS = new Set([ + "create_textdoc", + "update_textdoc", + "replace_textdoc", + "update_textdoc_regex", + "update_textdoc_by_lines", + "update_textdoc_anchored", + "apply_patch", + "undo_textdoc", + "rm", +]); - if (head.role === "plain_text") { - const key = "plain-text-" + index; - const nextMemo = [...memo, <PlainText key={key}>{head.content}</PlainText>]; - return renderMessages(tail, onRetry, waiting, nextMemo, index + 1); +type DisplayItemAssistant = { + type: "assistant"; + key: string; + index: number; + message: AssistantMessage; + contextFilesByToolId: Record<string, ChatContextFile[]>; + diffsByToolId: Record<string, DiffChunk[]>; + isStreaming: boolean; +}; + +type DisplayItemUser = { + type: "user"; + key: string; + index: number; + message: UserMessage; + isLastUser: boolean; +}; + +type DisplayItemContextFiles = { + type: "context_files"; + key: string; + files: ChatContextFile[]; + toolCallId?: string; +}; + +type DisplayItemDiffGroup = { + type: "diff_group"; + key: string; + diffs: DiffMessage[]; +}; + +type DisplayItemSystem = { + type: "system"; + key: string; + content: string; +}; + +type DisplayItemPlainText = { + type: "plain_text"; + key: string; + content: string; +}; + +type DisplayItem = + | DisplayItemAssistant + | DisplayItemUser + | DisplayItemContextFiles + | DisplayItemDiffGroup + | DisplayItemSystem + | DisplayItemPlainText; + +function buildDisplayItems( + messages: ChatMessages, + isStreaming: boolean, +): DisplayItem[] { + const items: DisplayItem[] = []; + if (messages.length === 0) return items; + + const hiddenQaIndices = computeHiddenQaMessageIndices(messages); + + let lastUserIdx = -1; + let lastAssistantIdx = -1; + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]; + if (msg.role === "user" && !hiddenQaIndices.has(i) && lastUserIdx === -1) { + lastUserIdx = i; + } + if (msg.role === "assistant" && lastAssistantIdx === -1) { + lastAssistantIdx = i; + } + if (lastUserIdx !== -1 && lastAssistantIdx !== -1) break; } - if (head.role === "assistant") { - const key = "assistant-input-" + index; - const isLast = !tail.some(isAssistantMessage); - const nextMemo = [ - ...memo, - <AssistantInput - key={key} - message={head.content} - reasoningContent={head.reasoning_content} - toolCalls={head.tool_calls} - serverExecutedTools={head.server_executed_tools} - citations={head.citations} - isLast={isLast} - usage={head.usage} - metering_coins_prompt={head.metering_coins_prompt} - metering_coins_generated={head.metering_coins_generated} - metering_coins_cache_creation={head.metering_coins_cache_creation} - metering_coins_cache_read={head.metering_coins_cache_read} - />, - ]; - - return renderMessages(tail, onRetry, waiting, nextMemo, index + 1); + for (let i = 0; i < messages.length; i++) { + const head = messages[i]; + + if (isToolMessage(head)) continue; + + if (head.role === "plain_text") { + items.push({ + type: "plain_text", + key: getMessageKey(head, i), + content: head.content, + }); + continue; + } + + if (head.role === "assistant") { + const key = getMessageKey(head, i); + const contextFilesAfter: DisplayItemContextFiles[] = []; + const diffMessagesAfter: DiffMessage[] = []; + const contextFilesByToolId: Record<string, ChatContextFile[]> = {}; + const diffsByToolId: Record<string, DiffChunk[]> = {}; + + const toolCalls = head.tool_calls ?? []; + const eligibleToolCalls = toolCalls.filter( + (tc) => tc.id && tc.function.name && READ_TOOLS.has(tc.function.name), + ); + const eligibleToolIds = new Set( + eligibleToolCalls + .map((tc) => tc.id) + .filter((id): id is string => Boolean(id)), + ); + const lastEligibleToolId = + eligibleToolCalls.length > 0 + ? eligibleToolCalls[eligibleToolCalls.length - 1].id + : null; + + const editToolCalls = toolCalls.filter( + (tc) => tc.id && tc.function.name && EDIT_TOOLS.has(tc.function.name), + ); + const editToolIds = new Set( + editToolCalls + .map((tc) => tc.id) + .filter((id): id is string => Boolean(id)), + ); + + let j = i + 1; + while (j < messages.length) { + const nextMsg = messages[j]; + + if (isToolMessage(nextMsg)) { + j++; + continue; + } + + if (isChatContextFileMessage(nextMsg)) { + if ( + nextMsg.tool_call_id === "knowledge_enrichment" || + nextMsg.tool_call_id === "project_context" + ) { + break; + } + + let targetToolId: string | null = null; + + if ( + nextMsg.tool_call_id && + eligibleToolIds.has(nextMsg.tool_call_id) + ) { + targetToolId = nextMsg.tool_call_id; + } else if (!nextMsg.tool_call_id && lastEligibleToolId) { + targetToolId = lastEligibleToolId; + } + + if (targetToolId) { + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + const prev = contextFilesByToolId[targetToolId] || []; + contextFilesByToolId[targetToolId] = [...prev, ...nextMsg.content]; + } else { + contextFilesAfter.push({ + type: "context_files", + key: getMessageKey(nextMsg, j), + files: nextMsg.content, + toolCallId: nextMsg.tool_call_id, + }); + } + j++; + continue; + } + + if (isDiffMessage(nextMsg)) { + if (nextMsg.tool_call_id && editToolIds.has(nextMsg.tool_call_id)) { + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + const prevDiffs = diffsByToolId[nextMsg.tool_call_id] || []; + diffsByToolId[nextMsg.tool_call_id] = [ + ...prevDiffs, + ...nextMsg.content, + ]; + } else { + diffMessagesAfter.push(nextMsg); + } + j++; + continue; + } + + break; + } + + items.push({ + type: "assistant", + key, + index: i, + message: head, + contextFilesByToolId, + diffsByToolId, + isStreaming: isStreaming && i === lastAssistantIdx, + }); + + for (const ctxItem of contextFilesAfter) { + items.push(ctxItem); + } + + if (diffMessagesAfter.length > 0) { + items.push({ + type: "diff_group", + key: `diffs-${key}`, + diffs: diffMessagesAfter, + }); + } + + i = j - 1; + continue; + } + + if (head.role === "user") { + if (hiddenQaIndices.has(i)) { + continue; + } + + items.push({ + type: "user", + key: getMessageKey(head, i), + index: i, + message: head, + isLastUser: i === lastUserIdx, + }); + continue; + } + + if (isChatContextFileMessage(head)) { + items.push({ + type: "context_files", + key: getMessageKey(head, i), + files: head.content, + toolCallId: head.tool_call_id, + }); + continue; + } + + if (isSystemMessage(head)) { + items.push({ + type: "system", + key: getMessageKey(head, i), + content: head.content, + }); + continue; + } + + if (isDiffMessage(head)) { + const key = getMessageKey(head, i); + const diffs: DiffMessage[] = [head]; + let j = i + 1; + while (j < messages.length) { + const m = messages[j]; + if (isToolMessage(m)) { + j++; + continue; + } + if (isDiffMessage(m)) { + diffs.push(m); + j++; + continue; + } + break; + } + + items.push({ + type: "diff_group", + key: `diffs-${key}`, + diffs, + }); + i = j - 1; + continue; + } } - if (head.role === "user") { - const key = "user-input-" + index; - const isLastUserMessage = !tail.some(isUserMessage); - const nextMemo = [ - ...memo, - isLastUserMessage && ( - <ScrollAreaWithAnchor.ScrollAnchor - key={`${key}-anchor`} - behavior="smooth" - block="start" - // my="-2" - /> - ), - <UserInput onRetry={onRetry} key={key} messageIndex={index}> - {head.content} - </UserInput>, - ]; - return renderMessages(tail, onRetry, waiting, nextMemo, index + 1); + return items; +} + +function tryIncrementalDisplayItemsUpdate( + previousMessages: ChatMessages | null, + nextMessages: ChatMessages, + previousItems: DisplayItem[] | null, + isStreaming: boolean, +): DisplayItem[] | null { + if (!previousMessages || !previousItems) return null; + if (previousMessages.length !== nextMessages.length) return null; + + let changedIndex = -1; + for (let i = 0; i < nextMessages.length; i++) { + if (previousMessages[i] !== nextMessages[i]) { + if (changedIndex !== -1) return null; + changedIndex = i; + } } - if (isChatContextFileMessage(head)) { - const key = "context-file-" + index; - const nextMemo = [...memo, <ContextFiles key={key} files={head.content} />]; - return renderMessages(tail, onRetry, waiting, nextMemo, index + 1); + let lastAssistantIdx = -1; + for (let i = nextMessages.length - 1; i >= 0; i--) { + if (nextMessages[i].role === "assistant") { + lastAssistantIdx = i; + break; + } } - if (isDiffMessage(head)) { - const restInTail = takeWhile(tail, (message) => { - return isDiffMessage(message) || isToolMessage(message); + if (changedIndex === -1) { + let needsStreamingPatch = false; + for (const item of previousItems) { + if (item.type !== "assistant") continue; + const shouldStream = isStreaming && item.index === lastAssistantIdx; + if (shouldStream !== item.isStreaming) { + needsStreamingPatch = true; + break; + } + } + if (!needsStreamingPatch) return previousItems; + return previousItems.map((item) => { + if (item.type !== "assistant") return item; + const shouldStream = isStreaming && item.index === lastAssistantIdx; + return shouldStream === item.isStreaming + ? item + : { ...item, isStreaming: shouldStream }; }); + } - const nextTail = tail.slice(restInTail.length); - const diffMessages = [head, ...restInTail.filter(isDiffMessage)]; - const key = "diffs-" + index; + const changedMessage = nextMessages[changedIndex]; + if (changedMessage.role !== "assistant") return null; + if (!isAssistantMessage(changedMessage)) return null; - const nextMemo = [...memo, <GroupedDiffs key={key} diffs={diffMessages} />]; + const nextItems = previousItems.map((item) => { + if (item.type !== "assistant") return item; + if (item.index !== changedIndex) { + const shouldStream = isStreaming && item.index === lastAssistantIdx; + return shouldStream === item.isStreaming + ? item + : { + ...item, + isStreaming: shouldStream, + }; + } - return renderMessages( - nextTail, - onRetry, - waiting, - nextMemo, - index + diffMessages.length, - ); + return { + ...item, + message: changedMessage, + isStreaming: isStreaming && item.index === lastAssistantIdx, + }; + }); + + return nextItems; +} + +function extractUserMessageText(content: UserMessage["content"]): string { + if (typeof content === "string") return content; + if (!Array.isArray(content)) return ""; + for (const item of content) { + if ("type" in item && item.type === "text" && "text" in item) { + return item.text; + } + if ("m_type" in item && item.m_type === "text" && "m_content" in item) { + return String(item.m_content); + } + } + return ""; +} + +function computeHiddenQaMessageIndices(messages: ChatMessages): Set<number> { + const hiddenIndices = new Set<number>(); + const askQuestionsToolIds = new Map<string, number>(); + + for (let i = 0; i < messages.length; i++) { + const msg = messages[i]; + if (msg.role === "assistant" && "tool_calls" in msg && msg.tool_calls) { + for (const tc of msg.tool_calls) { + if (tc.function.name === "ask_questions" && tc.id) { + askQuestionsToolIds.set(tc.id, i); + } + } + } + } + + for (const [toolCallId, assistantIdx] of askQuestionsToolIds) { + let foundToolResult = false; + for (let j = assistantIdx + 1; j < messages.length; j++) { + const msg = messages[j]; + if (isToolMessage(msg) && msg.tool_call_id === toolCallId) { + foundToolResult = true; + continue; + } + if (foundToolResult && msg.role === "user") { + const contentStr = extractUserMessageText(msg.content); + if (contentStr.startsWith(`[QA:${toolCallId}]`)) { + hiddenIndices.add(j); + } + break; + } + } } - return renderMessages(tail, onRetry, waiting, memo, index + 1); + return hiddenIndices; } diff --git a/refact-agent/gui/src/components/ChatContent/ChatLoading.module.css b/refact-agent/gui/src/components/ChatContent/ChatLoading.module.css new file mode 100644 index 000000000..98d22c8b9 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ChatLoading.module.css @@ -0,0 +1,80 @@ +.container { + animation: fadeIn 0.15s ease-out; +} + +@keyframes fadeIn { + from { + opacity: 0; + } + to { + opacity: 1; + } +} + +.dotsContainer { + display: flex; + gap: 8px; + align-items: center; + justify-content: center; + height: 40px; +} + +.dot { + width: 12px; + height: 12px; + background-color: var(--red-9); + border-radius: 50%; + animation: bounce 1.4s ease-in-out infinite; +} + +.dot:nth-child(1) { + animation-delay: 0s; +} + +.dot:nth-child(2) { + animation-delay: 0.2s; +} + +.dot:nth-child(3) { + animation-delay: 0.4s; +} + +@keyframes bounce { + 0%, + 80%, + 100% { + transform: scale(0.6); + opacity: 0.5; + } + 40% { + transform: scale(1); + opacity: 1; + } +} + +.skeletonContainer { + width: 100%; + max-width: 400px; +} + +.skeletonLine { + height: 12px; + background: linear-gradient( + 90deg, + var(--gray-a3) 0%, + var(--gray-a5) 50%, + var(--gray-a3) 100% + ); + background-size: 200% 100%; + border-radius: var(--radius-2); + animation: shimmer 1.5s ease-in-out infinite; +} + +@keyframes shimmer { + 0% { + background-position: 200% 0; + } + 100% { + background-position: -200% 0; + } +} diff --git a/refact-agent/gui/src/components/ChatContent/ChatLoading.tsx b/refact-agent/gui/src/components/ChatContent/ChatLoading.tsx new file mode 100644 index 000000000..330187692 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ChatLoading.tsx @@ -0,0 +1,33 @@ +import React from "react"; +import { Flex, Container, Box } from "@radix-ui/themes"; +import styles from "./ChatLoading.module.css"; + +export const ChatLoading: React.FC = () => { + return ( + <Container> + <Flex + direction="column" + align="center" + justify="center" + gap="4" + py="9" + className={styles.container} + > + <Box className={styles.dotsContainer}> + <Box className={styles.dot} /> + <Box className={styles.dot} /> + <Box className={styles.dot} /> + </Box> + + <Flex direction="column" gap="3" className={styles.skeletonContainer}> + <Box className={styles.skeletonLine} style={{ width: "85%" }} /> + <Box className={styles.skeletonLine} style={{ width: "70%" }} /> + <Box className={styles.skeletonLine} style={{ width: "90%" }} /> + <Box className={styles.skeletonLine} style={{ width: "60%" }} /> + </Flex> + </Flex> + </Container> + ); +}; + +ChatLoading.displayName = "ChatLoading"; diff --git a/refact-agent/gui/src/components/ChatContent/CollapsibleStore.ts b/refact-agent/gui/src/components/ChatContent/CollapsibleStore.ts new file mode 100644 index 000000000..192720c6c --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/CollapsibleStore.ts @@ -0,0 +1,4 @@ +export type CollapsibleStore = { + get: (key: string) => boolean | undefined; + set: (key: string, open: boolean) => void; +}; diff --git a/refact-agent/gui/src/components/ChatContent/ContextFiles.module.css b/refact-agent/gui/src/components/ChatContent/ContextFiles.module.css new file mode 100644 index 000000000..c3d7e2b99 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ContextFiles.module.css @@ -0,0 +1,133 @@ +.card { + overflow: hidden; +} + +.header { + padding: var(--space-1) 0; + cursor: pointer; + user-select: none; + min-height: 24px; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.icon { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; + height: 16px; + flex-shrink: 0; +} + +.icon svg { + width: 14px; + height: 14px; +} + +.summary { + color: inherit; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.contentInner { + overflow: hidden; +} + +.content { + padding: var(--space-2) 0 var(--space-1); + padding-left: var(--space-4); +} + +.section { + margin-bottom: var(--space-2); +} + +.sectionHeader { + margin-bottom: var(--space-1); +} + +.sectionIcon { + display: flex; + align-items: center; + justify-content: center; + width: 14px; + height: 14px; + color: var(--gray-9); +} + +.sectionIcon svg { + width: 12px; + height: 12px; +} + +.sectionTitle { + color: var(--gray-9); +} + +.sectionContent { + padding-left: var(--space-3); +} + +.fileItem { + overflow: hidden; +} + +.fileHeader { + padding: var(--space-1) 0; + cursor: pointer; + transition: filter 0.15s ease; +} + +.fileHeader:hover { + filter: brightness(1.3); +} + +.fileName { + font-family: var(--code-font-family); + color: var(--accent-11); + cursor: pointer; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.fileName:hover { + text-decoration: underline; +} + +.relevance { + color: var(--gray-9); + flex-shrink: 0; +} + +.fileContent { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; + margin-left: var(--space-3); +} diff --git a/refact-agent/gui/src/components/ChatContent/ContextFiles.tsx b/refact-agent/gui/src/components/ChatContent/ContextFiles.tsx index 8c348dfdd..6bfabb702 100644 --- a/refact-agent/gui/src/components/ChatContent/ContextFiles.tsx +++ b/refact-agent/gui/src/components/ChatContent/ContextFiles.tsx @@ -1,35 +1,29 @@ -import React from "react"; -import { Flex, Container, Box, HoverCard, Text } from "@radix-ui/themes"; -import styles from "./ChatContent.module.css"; -import { ChatContextFile } from "../../services/refact"; -import classnames from "classnames"; -import { TruncateLeft, Small } from "../Text"; -import * as Collapsible from "@radix-ui/react-collapsible"; - -import { ScrollArea } from "../ScrollArea"; -import { Link } from "../Link"; +import React, { useState, useCallback } from "react"; +import { Flex, Box, Text } from "@radix-ui/themes"; +import classNames from "classnames"; import ReactMarkDown from "react-markdown"; - -import { MarkdownCodeBlock } from "../Markdown/CodeBlock"; -import { Chevron } from "../Collapsible"; +import { + FileIcon, + ArchiveIcon, + ReaderIcon, + LightningBoltIcon, +} from "@radix-ui/react-icons"; +import { ChatContextFile } from "../../services/refact"; +import { ShikiCodeBlock } from "../Markdown/ShikiCodeBlock"; import { filename } from "../../utils"; import { useEventsBusForIDE } from "../../hooks"; +import { useDelayedUnmount } from "../shared/useDelayedUnmount"; +import styles from "./ContextFiles.module.css"; +// Re-export Markdown for backward compatibility export const Markdown: React.FC<{ children: string; - startingLineNumber?: number; -}> = ({ startingLineNumber, ...props }) => { +}> = (props) => { return ( <ReactMarkDown components={{ code({ style: _style, color: _color, ...codeProps }) { - return ( - <MarkdownCodeBlock - {...codeProps} - showLineNumbers={false} - startingLineNumber={startingLineNumber} - /> - ); + return <ShikiCodeBlock {...codeProps} showLineNumbers={false} />; }, }} {...props} @@ -37,137 +31,417 @@ export const Markdown: React.FC<{ ); }; -function getFileInfoFromName(name: string) { +function getExtensionFromName(name: string): string { const dot = name.lastIndexOf("."); + if (dot === -1) return ""; + return name.substring(dot + 1).replace(/:\d*-\d*/, ""); +} + +type ContextVariant = + | "default" + | "enrichment" + | "project_context" + | "memories_context"; + +function isInstructionFile(filePath: string): boolean { + const lower = filePath.toLowerCase(); + return ( + lower.includes("agents.md") || + lower.includes("claude.md") || + lower.includes("gemini.md") || + lower.includes("refact.md") || + lower.includes(".cursorrules") || + lower.includes(".cursor/rules") || + lower.includes("global_rules.md") || + lower.includes(".windsurf/rules") || + lower.includes("copilot-instructions") || + lower.includes(".github/instructions") || + lower.includes(".aider.conf") || + lower.includes(".refact/project_summary") || + lower.includes(".refact/instructions") + ); +} + +function isIdeSettingFile(filePath: string): boolean { + const lower = filePath.toLowerCase(); + return ( + lower.includes(".vscode/") || + lower.includes(".idea/") || + lower.includes(".zed/") || + lower.includes(".fleet/") || + lower.includes(".claude/") + ); +} + +function formatFileName( + filePath: string, + line1?: number, + line2?: number, +): string { + const name = filename(filePath); + if (line1 && line2 && line1 !== 0 && line2 !== 0) { + return `${name}:${line1}-${line2}`; + } + return name; +} - if (dot === -1) - return { - extension: "", - start: 1, - }; - const extendsionAndLines = dot === -1 ? "" : name.substring(dot + 1); - const extension = extendsionAndLines.replace(/:\d*-\d*/, ""); +function extractEnrichmentDisplayName(filePath: string): string { + const fileName = filename(filePath); - if (!/:\d*-\d*/.test(extendsionAndLines)) { - return { extension, start: 1 }; + const memoryMatch = fileName.match( + /^\d{4}-\d{2}-\d{2}_\d{6}_[a-f0-9]+_(.+)\.md$/, + ); + if (memoryMatch) { + return memoryMatch[1].replace(/-/g, " "); } - const lineIndex = extendsionAndLines.lastIndexOf(":"); - const lines = extendsionAndLines.substring(lineIndex + 1); - const [start] = lines.split("-"); - const maybeNumber = Number(start); + const trajectoryMatch = fileName.match(/^[a-f0-9-]{36}\.json$/); + if (trajectoryMatch) { + return "Past conversation"; + } - return { - extension, - start: maybeNumber, - }; + return fileName; } -export const ContextFile: React.FC<{ - name: string; - children: string; - className?: string; - onClick?: React.MouseEventHandler<HTMLAnchorElement> | undefined; -}> = ({ name, onClick, ...props }) => { - const [open, setOpen] = React.useState(false); - const { extension, start } = getFileInfoFromName(name); - const text = "```" + extension + "\n" + props.children + "\n```"; +function extractProjectContextDisplayName(filePath: string): string { + const parts = filePath.split("/"); + + const markers = [ + ".vscode", + ".idea", + ".cursor", + ".windsurf", + ".github", + ".refact", + ".zed", + ".fleet", + ".claude", + ]; + for (let i = 0; i < parts.length; i++) { + if (markers.includes(parts[i])) { + return parts.slice(i).join("/"); + } + } + + const fileName = filename(filePath); + const instructionFiles = [ + "AGENTS.md", + "CLAUDE.md", + "GEMINI.md", + "REFACT.md", + ".cursorrules", + "global_rules.md", + "copilot-instructions.md", + ".aider.conf.yml", + ]; + if ( + instructionFiles.some((f) => fileName.toLowerCase() === f.toLowerCase()) + ) { + return fileName; + } + + if (parts.length >= 2) { + return parts.slice(-2).join("/"); + } + + return fileName; +} + +const FileItem: React.FC<{ + file: ChatContextFile; + onOpenFile: (file: { file_path: string; line?: number }) => Promise<void>; + variant: ContextVariant; +}> = ({ file, onOpenFile, variant }) => { + const [isOpen, setIsOpen] = useState(false); + const { shouldRender, isAnimatingOpen } = useDelayedUnmount(isOpen, 200); + const extension = getExtensionFromName(file.file_name); + + const displayName = + variant === "enrichment" + ? extractEnrichmentDisplayName(file.file_name) + : variant === "project_context" + ? extractProjectContextDisplayName(file.file_name) + : formatFileName(file.file_name, file.line1, file.line2); + + const relevance = file.usefulness ? Math.round(file.usefulness) : null; + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + const handleFileClick = useCallback( + (e: React.MouseEvent) => { + e.stopPropagation(); + void onOpenFile({ + file_path: file.file_name, + line: file.line1, + }); + }, + [onOpenFile, file.file_name, file.line1], + ); + + return ( + <div className={styles.fileItem}> + <Flex + className={styles.fileHeader} + align="center" + gap="2" + onClick={handleToggle} + > + <Text size="1" className={styles.fileName} onClick={handleFileClick}> + {displayName} + </Text> + {relevance !== null && ( + <Text size="1" className={styles.relevance}> + {relevance}% + </Text> + )} + </Flex> + + {shouldRender && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + )} + > + <div className={styles.contentInner}> + <Box className={styles.fileContent}> + <ShikiCodeBlock showLineNumbers={false}> + {`\`\`\`${extension}\n${file.file_content}\n\`\`\``} + </ShikiCodeBlock> + </Box> + </div> + </div> + )} + </div> + ); +}; + +const FileSection: React.FC<{ + icon: React.ReactNode; + title: string; + files: ChatContextFile[]; + onOpenFile: (file: { file_path: string; line?: number }) => Promise<void>; + variant: ContextVariant; +}> = ({ icon, title, files, onOpenFile, variant }) => { return ( - <Box position="relative"> - <HoverCard.Root onOpenChange={setOpen} open={open}> - <HoverCard.Trigger> - <Box> - <Small className={classnames(styles.file, props.className)}> - &nbsp;&nbsp;&nbsp;&nbsp; - <TruncateLeft> - <Link onClick={onClick}>{name}</Link> - </TruncateLeft> - </Small> - </Box> - </HoverCard.Trigger> - <ScrollArea scrollbars="both" asChild> - <HoverCard.Content - size="1" - maxHeight="50vh" - maxWidth="90vw" - avoidCollisions - > - <Markdown startingLineNumber={start}>{text}</Markdown> - </HoverCard.Content> - </ScrollArea> - </HoverCard.Root> + <Box className={styles.section}> + <Flex align="center" gap="2" className={styles.sectionHeader}> + <span className={styles.sectionIcon}>{icon}</span> + <Text size="1" className={styles.sectionTitle}> + {title} + </Text> + </Flex> + <Flex direction="column" gap="1" className={styles.sectionContent}> + {files.map((file, index) => ( + <FileItem + key={file.file_name + index} + file={file} + onOpenFile={onOpenFile} + variant={variant} + /> + ))} + </Flex> </Box> ); }; -const ContextFilesContent: React.FC<{ +const FilesContent: React.FC<{ files: ChatContextFile[]; onOpenFile: (file: { file_path: string; line?: number }) => Promise<void>; -}> = ({ files, onOpenFile }) => { + variant: ContextVariant; +}> = ({ files, onOpenFile, variant }) => { if (files.length === 0) return null; + if (variant === "enrichment") { + const memories = files.filter((f) => + f.file_name.includes("/.refact/memories/"), + ); + const trajectories = files.filter((f) => + f.file_name.includes("/.refact/trajectories/"), + ); + const other = files.filter( + (f) => + !f.file_name.includes("/.refact/memories/") && + !f.file_name.includes("/.refact/trajectories/"), + ); + + return ( + <Flex direction="column" gap="2"> + {memories.length > 0 && ( + <FileSection + icon={<ReaderIcon />} + title="Knowledge" + files={memories} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + {trajectories.length > 0 && ( + <FileSection + icon={<ArchiveIcon />} + title="Past Conversations" + files={trajectories} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + {other.length > 0 && ( + <FileSection + icon={<FileIcon />} + title="Related" + files={other} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + </Flex> + ); + } + + if (variant === "project_context") { + const instructions = files.filter((f) => isInstructionFile(f.file_name)); + const ideSettings = files.filter((f) => isIdeSettingFile(f.file_name)); + const other = files.filter( + (f) => !isInstructionFile(f.file_name) && !isIdeSettingFile(f.file_name), + ); + + return ( + <Flex direction="column" gap="2"> + {instructions.length > 0 && ( + <FileSection + icon={<ReaderIcon />} + title="Instructions" + files={instructions} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + {ideSettings.length > 0 && ( + <FileSection + icon={<ArchiveIcon />} + title="IDE Settings" + files={ideSettings} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + {other.length > 0 && ( + <FileSection + icon={<FileIcon />} + title="Other" + files={other} + onOpenFile={onOpenFile} + variant={variant} + /> + )} + </Flex> + ); + } + return ( - <Container> - <pre style={{ margin: 0 }}> - <Flex wrap="nowrap" direction="column"> - {files.map((file, index) => { - const lineText = - file.line1 && file.line2 && file.line1 !== 0 && file.line2 !== 0 - ? `:${file.line1}-${file.line2}` - : ""; - const key = file.file_name + lineText + index; - return ( - <ContextFile - onClick={(event) => { - event.preventDefault(); - // TODO: this maybe will need to be reworked in the future - // but VSCode handles well file_path to be relative to the actual file_name as file_path - void onOpenFile({ - ...file, - file_path: file.file_name, - }); - }} - key={key} - name={file.file_name + lineText} - > - {file.file_content} - </ContextFile> - ); - })} - </Flex> - </pre> - </Container> + <Flex direction="column" gap="1"> + {files.map((file, index) => ( + <FileItem + key={file.file_name + index} + file={file} + onOpenFile={onOpenFile} + variant={variant} + /> + ))} + </Flex> ); }; -export const ContextFiles: React.FC<{ +const _ContextFiles: React.FC<{ files: ChatContextFile[]; -}> = ({ files }) => { - const [open, setOpen] = React.useState(false); + toolCallId?: string; + open?: boolean; + onOpenChange?: (open: boolean) => void; +}> = ({ files, toolCallId, open: controlledOpen, onOpenChange }) => { + const [internalOpen, setInternalOpen] = useState(false); const { queryPathThenOpenFile } = useEventsBusForIDE(); - if (files.length === 0) return null; + const isControlled = controlledOpen !== undefined; + const isOpen = isControlled ? controlledOpen : internalOpen; + const { shouldRender, isAnimatingOpen } = useDelayedUnmount(isOpen, 200); + + const handleToggle = useCallback(() => { + if (isControlled && onOpenChange) { + onOpenChange(!controlledOpen); + } else { + setInternalOpen((prev) => !prev); + } + }, [isControlled, onOpenChange, controlledOpen]); + + if (!Array.isArray(files) || files.length === 0) return null; + + const variant: ContextVariant = + toolCallId === "knowledge_enrichment" + ? "enrichment" + : toolCallId === "project_context" + ? "project_context" + : toolCallId === "memories_context" + ? "memories_context" + : "default"; - const fileNames = files.map((file) => filename(file.file_name)); + const icon = + variant === "enrichment" ? ( + <LightningBoltIcon /> + ) : variant === "project_context" ? ( + <ArchiveIcon /> + ) : variant === "memories_context" ? ( + <LightningBoltIcon /> + ) : ( + <FileIcon /> + ); + + const label = + variant === "enrichment" + ? `Memories (${files.length})` + : variant === "project_context" + ? `Project context (${files.length})` + : variant === "memories_context" + ? `User memories (${files.length})` + : files + .map((f) => formatFileName(f.file_name, f.line1, f.line2)) + .join(", "); return ( - <Container> - <Collapsible.Root open={open} onOpenChange={setOpen}> - <Collapsible.Trigger asChild> - <Flex gap="2" align="start" py="2" style={{ cursor: "pointer" }}> - <Text weight="light" size="1"> - 📎 {fileNames.join(", ")} - </Text> - <Chevron open={open} /> - </Flex> - </Collapsible.Trigger> - <Collapsible.Content> - <ContextFilesContent - files={files} - onOpenFile={queryPathThenOpenFile} - /> - </Collapsible.Content> - </Collapsible.Root> - </Container> + <div className={styles.card}> + <Flex + className={styles.header} + align="center" + gap="2" + onClick={handleToggle} + > + <span className={styles.icon}>{icon}</span> + <Text size="1" className={styles.summary}> + {label} + </Text> + </Flex> + + {shouldRender && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + )} + > + <div className={styles.contentInner}> + <Box className={styles.content}> + <FilesContent + files={files} + onOpenFile={queryPathThenOpenFile} + variant={variant} + /> + </Box> + </div> + </div> + )} + </div> ); }; + +export const ContextFiles = React.memo(_ContextFiles); diff --git a/refact-agent/gui/src/components/ChatContent/DiffContent.tsx b/refact-agent/gui/src/components/ChatContent/DiffContent.tsx index e7f178b7a..f0f95f8f1 100644 --- a/refact-agent/gui/src/components/ChatContent/DiffContent.tsx +++ b/refact-agent/gui/src/components/ChatContent/DiffContent.tsx @@ -116,139 +116,87 @@ export const DiffTitle: React.FC<{ diffs: Record<string, DiffChunk[]>; }> = ({ diffs }): React.ReactNode[] => { const entries = Object.entries(diffs); + const nodes: React.ReactNode[] = []; - function process( - items: [string, DiffChunk[]][], - memo: React.ReactNode[] = [], - ): React.ReactNode[] { - if (items.length === 0) return memo; - const [head, ...tail] = items; - const [fullPath, diffForFile] = head; + for (const [fullPath, diffForFile] of entries) { const name = filename(fullPath); - // Check if this is a rename action const renameAction = diffForFile.find( (diff) => diff.file_action === "rename" && diff.file_name_rename, ); - const addLength = diffForFile.reduce<number>((acc, diff) => { - return acc + (diff.lines_add ? diff.lines_add.split("\n").length : 0); - }, 0); - const removeLength = diffForFile.reduce<number>((acc, diff) => { - return ( - acc + (diff.lines_remove ? diff.lines_remove.split("\n").length : 0) - ); - }, 0); - const adds = "+".repeat(addLength); - const removes = "-".repeat(removeLength); + let addCount = 0; + let removeCount = 0; + for (const diff of diffForFile) { + if (diff.lines_add) addCount += diff.lines_add.split("\n").length; + if (diff.lines_remove) + removeCount += diff.lines_remove.split("\n").length; + } + + if (nodes.length > 0) { + nodes.push(", "); + } - // Directly return the element based on condition if (renameAction?.file_name_rename) { - // Display rename information const newName = filename(renameAction.file_name_rename); - return process( - tail, - memo.length > 0 - ? [ - ...memo, - ", ", - <Text - style={{ display: "inline-block" }} - key={fullPath + "-" + diffForFile.length} - > - {name}{" "} - <Text color="orange" style={{ fontStyle: "italic" }}> - → {newName} - </Text> - </Text>, - ] - : [ - <Text - style={{ display: "inline-block" }} - key={fullPath + "-" + diffForFile.length} - > - {name}{" "} - <Text color="orange" style={{ fontStyle: "italic" }}> - → {newName} - </Text> - </Text>, - ], + nodes.push( + <Text + style={{ display: "inline-block" }} + key={fullPath + "-" + diffForFile.length} + > + {name}{" "} + <Text color="orange" style={{ fontStyle: "italic" }}> + → {newName} + </Text> + </Text>, ); } else { - return process( - tail, - memo.length > 0 - ? [ - ...memo, - ", ", - <Text - style={{ display: "inline-block" }} - key={fullPath + "-" + diffForFile.length} - > - {name}{" "} - <Text - color="red" - wrap="wrap" - style={{ wordBreak: "break-all" }} - > - {removes} - </Text> - <Text - color="green" - wrap="wrap" - style={{ wordBreak: "break-all" }} - > - {adds} - </Text> - </Text>, - ] - : [ - <Text - style={{ display: "inline-block" }} - key={fullPath + "-" + diffForFile.length} - > - {name}{" "} - <Text - color="red" - wrap="wrap" - style={{ wordBreak: "break-all" }} - > - {removes} - </Text> - <Text - color="green" - wrap="wrap" - style={{ wordBreak: "break-all" }} - > - {adds} - </Text> - </Text>, - ], + nodes.push( + <Text + style={{ display: "inline-block" }} + key={fullPath + "-" + diffForFile.length} + > + {name} {addCount > 0 && <Text color="green">+{addCount}</Text>} + {addCount > 0 && removeCount > 0 && " "} + {removeCount > 0 && <Text color="red">-{removeCount}</Text>} + </Text>, ); } - - // const nextMemo = memo.length > 0 ? [...memo, ", ", element] : [element]; - - // return process(tail, nextMemo); } - return process(entries); + return nodes; }; export const DiffContent: React.FC<{ diffs: Record<string, DiffChunk[]>; -}> = ({ diffs }) => { - const [open, setOpen] = React.useState(false); + open?: boolean; + onOpenChange?: (open: boolean) => void; +}> = ({ diffs, open: controlledOpen, onOpenChange }) => { + const [internalOpen, setInternalOpen] = React.useState(false); const ref = useRef<HTMLDivElement>(null); const handleScroll = useHideScroll(ref); + const isControlled = controlledOpen !== undefined; + const open = isControlled ? controlledOpen : internalOpen; + + const setOpen = useCallback( + (value: boolean) => { + if (isControlled && onOpenChange) { + onOpenChange(value); + } else { + setInternalOpen(value); + } + }, + [isControlled, onOpenChange], + ); + const handleHide = useCallback(() => { setOpen(false); handleScroll(); - }, [handleScroll]); + }, [handleScroll, setOpen]); return ( - <Collapsible.Root open={open} onOpenChange={setOpen}> + <Collapsible.Root open={open} onOpenChange={(v) => setOpen(v)}> <Collapsible.Trigger asChild> <Flex gap="2" align="center" ref={ref}> <Text weight="light" size="1"> @@ -344,21 +292,33 @@ export const DiffForm: React.FC<{ type GroupedDiffsProps = { diffs: DiffMessage[]; + open?: boolean; + onOpenChange?: (open: boolean) => void; }; -export const GroupedDiffs: React.FC<GroupedDiffsProps> = ({ diffs }) => { - const chunks = diffs.reduce<DiffMessage["content"]>( - (acc, diff) => [...acc, ...diff.content], - [], - ); +const _GroupedDiffs: React.FC<GroupedDiffsProps> = ({ + diffs, + open, + onOpenChange, +}) => { + const chunks: DiffMessage["content"] = []; + for (const diff of diffs) { + chunks.push(...diff.content); + } const groupedByFileName = groupBy(chunks, (chunk) => chunk.file_name); return ( <Container> <Flex direction="column" gap="4" py="4"> - <DiffContent diffs={groupedByFileName} /> + <DiffContent + diffs={groupedByFileName} + open={open} + onOpenChange={onOpenChange} + /> </Flex> </Container> ); }; + +export const GroupedDiffs = React.memo(_GroupedDiffs); diff --git a/refact-agent/gui/src/components/ChatContent/LikeButton.module.css b/refact-agent/gui/src/components/ChatContent/LikeButton.module.css deleted file mode 100644 index 094b72b5b..000000000 --- a/refact-agent/gui/src/components/ChatContent/LikeButton.module.css +++ /dev/null @@ -1,20 +0,0 @@ -.like__button__success { - animation: successAnimation 0.5s ease-in-out; - animation-fill-mode: forwards; -} - -@keyframes successAnimation { - 0% { - transform: scale(1); - color: var(--green-9); - } - 50% { - transform: scale(1.2); - color: var(--yellow-9); - } - 100% { - transform: scale(1); - color: var(--blue-9); - display: none; - } -} diff --git a/refact-agent/gui/src/components/ChatContent/LikeButton.tsx b/refact-agent/gui/src/components/ChatContent/LikeButton.tsx deleted file mode 100644 index 3f17c9bbe..000000000 --- a/refact-agent/gui/src/components/ChatContent/LikeButton.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import React from "react"; -import { IconButton, Tooltip } from "@radix-ui/themes"; -import classnames from "classnames"; -import { knowledgeApi } from "../../services/refact/knowledge"; -import { useAppSelector } from "../../hooks"; -import { - selectIsStreaming, - selectIsWaiting, - selectMessages, -} from "../../features/Chat"; -import styles from "./LikeButton.module.css"; -import { useSelector } from "react-redux"; -import { selectThreadProjectOrCurrentProject } from "../../features/Chat/currentProject"; - -function useCreateMemory() { - const messages = useAppSelector(selectMessages); - const isStreaming = useAppSelector(selectIsStreaming); - const isWaiting = useAppSelector(selectIsWaiting); - const currentProjectName = useSelector(selectThreadProjectOrCurrentProject); - const [saveTrajectory, saveResponse] = - knowledgeApi.useCreateNewMemoryFromMessagesMutation(); - - const submitSave = React.useCallback(() => { - void saveTrajectory({ project: currentProjectName, messages }); - }, [currentProjectName, messages, saveTrajectory]); - - const shouldShow = React.useMemo(() => { - if (messages.length === 0) return false; - if (isStreaming) return false; - if (isWaiting) return false; - return true; - }, [messages.length, isStreaming, isWaiting]); - - return { submitSave, saveResponse, shouldShow }; -} - -export const LikeButton = () => { - const { submitSave, saveResponse, shouldShow } = useCreateMemory(); - - if (!shouldShow) return null; - return ( - <Tooltip content="Save the trajectory overview to memory"> - <IconButton - variant="ghost" - onClick={submitSave} - disabled={saveResponse.isLoading || saveResponse.isSuccess} - loading={saveResponse.isLoading} - size="2" - className={classnames( - saveResponse.isSuccess && styles.like__button__success, - )} - > - <SaveIcon /> - </IconButton> - </Tooltip> - ); -}; - -const SaveIcon: React.FC = () => { - return ( - <svg - height="20" - width="20" - viewBox="0 0 24 24" - fill="none" - xmlns="http://www.w3.org/2000/svg" - > - <path - fill="currentColor" - fillRule="evenodd" - clipRule="evenodd" - d="M17 3H5C3.89 3 3 3.9 3 5V19C3 20.1 3.89 21 5 21H19C20.1 21 21 20.1 21 19V7L17 3ZM19 19H5V5H16.17L19 7.83V19ZM12 12C10.34 12 9 13.34 9 15C9 16.66 10.34 18 12 18C13.66 18 15 16.66 15 15C15 13.34 13.66 12 12 12ZM6 6H15V10H6V6Z" - /> - </svg> - ); -}; diff --git a/refact-agent/gui/src/components/ChatContent/MessageFooter.module.css b/refact-agent/gui/src/components/ChatContent/MessageFooter.module.css new file mode 100644 index 000000000..46974d964 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/MessageFooter.module.css @@ -0,0 +1,57 @@ +.messageWrapper { + position: relative; + min-width: 0; +} + +.footerLane { + height: 28px; + display: flex; + align-items: center; +} + +.footerContent { + display: flex; + align-items: center; + gap: var(--space-2); + opacity: 0; + visibility: hidden; + transition: + opacity 0.15s ease, + visibility 0.15s ease; +} + +.messageWrapper:hover .footerContent, +.messageWrapper:focus-within .footerContent { + opacity: 1; + visibility: visible; +} + +/* Footer item - matches tool card icon style */ +.footerItem { + display: inline-flex; + align-items: center; + gap: 4px; + cursor: pointer; + color: var(--gray-10); + font-size: var(--font-size-1); + transition: filter 0.15s ease; + padding: var(--space-1) 0; +} + +.footerItem:hover { + filter: brightness(1.3); +} + +.footerItem svg { + width: 14px; + height: 14px; + flex-shrink: 0; +} + +.footerItemDanger { + color: var(--red-9); +} + +.footerItemDanger:hover { + filter: brightness(1.3); +} diff --git a/refact-agent/gui/src/components/ChatContent/MessageFooter.tsx b/refact-agent/gui/src/components/ChatContent/MessageFooter.tsx new file mode 100644 index 000000000..5c8551a2a --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/MessageFooter.tsx @@ -0,0 +1,300 @@ +import React, { useCallback } from "react"; +import { Flex, Text, HoverCard, Box, Tooltip } from "@radix-ui/themes"; +import { + CopyIcon, + Share1Icon, + TrashIcon, + BarChartIcon, +} from "@radix-ui/react-icons"; +import { Usage } from "../../services/refact"; +import { Checkpoint } from "../../features/Checkpoints/types"; +import { formatNumberToFixed } from "../../utils/formatNumberToFixed"; +import { calculateUsageInputTokens } from "../../utils/calculateUsageInputTokens"; +import { formatUsd } from "../../utils/getMetering"; +import { Coin } from "../../images"; +import { CheckpointButton } from "../../features/Checkpoints"; +import styles from "./MessageFooter.module.css"; + +type MessageFooterProps = { + messageId?: string; + onCopy?: () => void; + onBranch?: (messageId: string) => void; + onDelete?: (messageId: string) => void; + usage?: Usage | null; + metering_coins_prompt?: number; + metering_coins_generated?: number; + metering_coins_cache_creation?: number; + metering_coins_cache_read?: number; + // For user messages with checkpoints + checkpoints?: Checkpoint[] | null; + messageIndex?: number; +}; + +const TokenDisplay: React.FC<{ label: string; value: number }> = ({ + label, + value, +}) => ( + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="1" weight="bold"> + {label} + </Text> + <Text size="1">{formatNumberToFixed(value)}</Text> + </Flex> +); + +const CoinDisplay: React.FC<{ label: string; value: number }> = ({ + label, + value, +}) => ( + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="1" weight="bold"> + {label} + </Text> + <Text size="1"> + <Flex align="center" gap="2"> + {Math.round(value)} <Coin width="12px" height="12px" /> + </Flex> + </Text> + </Flex> +); + +const UsdDisplay: React.FC<{ label: string; value: number | undefined }> = ({ + label, + value, +}) => ( + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="1" weight="bold"> + {label} + </Text> + <Text size="1">{formatUsd(value)}</Text> + </Flex> +); + +export const MessageFooter: React.FC<MessageFooterProps> = ({ + messageId, + onCopy, + onBranch, + onDelete, + usage, + metering_coins_prompt = 0, + metering_coins_generated = 0, + metering_coins_cache_creation = 0, + metering_coins_cache_read = 0, + checkpoints, + messageIndex, +}) => { + const handleBranch = useCallback(() => { + if (messageId && onBranch) { + onBranch(messageId); + } + }, [messageId, onBranch]); + + const handleDelete = useCallback(() => { + if (messageId && onDelete) { + onDelete(messageId); + } + }, [messageId, onDelete]); + + const outputTokens = calculateUsageInputTokens({ + usage, + keys: ["completion_tokens"], + }); + + const totalCoins = + metering_coins_prompt + + metering_coins_generated + + metering_coins_cache_creation + + metering_coins_cache_read; + + const meteringUsd = usage?.metering_usd; + const hasUsd = meteringUsd !== undefined && meteringUsd.total_usd > 0; + const showCoins = !hasUsd && totalCoins > 0; + + const contextTokens = calculateUsageInputTokens({ + usage, + keys: [ + "prompt_tokens", + "cache_creation_input_tokens", + "cache_read_input_tokens", + ], + }); + const hasUsageInfo = + Boolean(usage && contextTokens > 0) || showCoins || hasUsd; + + return ( + <div className={styles.footerLane}> + <div className={styles.footerContent}> + {/* Checkpoints button (for user messages) */} + {checkpoints && + checkpoints.length > 0 && + messageIndex !== undefined && ( + <CheckpointButton + checkpoints={checkpoints} + messageIndex={messageIndex} + /> + )} + {/* Action buttons - styled like tool card icons */} + {onCopy && ( + <Tooltip content="Copy message"> + <div className={styles.footerItem} onClick={onCopy}> + <CopyIcon /> + </div> + </Tooltip> + )} + {onBranch && messageId && ( + <Tooltip content="Branch from here"> + <div className={styles.footerItem} onClick={handleBranch}> + <Share1Icon /> + </div> + </Tooltip> + )} + {onDelete && messageId && ( + <Tooltip content="Delete message"> + <div + className={`${styles.footerItem} ${styles.footerItemDanger}`} + onClick={handleDelete} + > + <TrashIcon /> + </div> + </Tooltip> + )} + + {/* Token/cost info - styled like tool card icons */} + {hasUsageInfo && ( + <HoverCard.Root> + <HoverCard.Trigger> + <Flex align="center" gap="2"> + {contextTokens > 0 && ( + <div className={styles.footerItem}> + <BarChartIcon /> + <span>{formatNumberToFixed(contextTokens)}</span> + </div> + )} + {showCoins && ( + <div className={styles.footerItem}> + <Coin /> + <span>{Math.round(totalCoins)}</span> + </div> + )} + {hasUsd && ( + <div className={styles.footerItem}> + <span>{formatUsd(meteringUsd.total_usd)}</span> + </div> + )} + </Flex> + </HoverCard.Trigger> + <HoverCard.Content size="1" maxWidth="300px"> + <Flex direction="column" gap="2"> + <Text size="2" weight="bold" mb="1"> + This Message + </Text> + + {usage && ( + <> + <TokenDisplay label="Context size" value={contextTokens} /> + <TokenDisplay label="Output tokens" value={outputTokens} /> + {usage.completion_tokens_details?.reasoning_tokens != + null && + usage.completion_tokens_details.reasoning_tokens > 0 && ( + <TokenDisplay + label="Reasoning tokens" + value={ + usage.completion_tokens_details.reasoning_tokens + } + /> + )} + </> + )} + + {showCoins && ( + <> + <Box + my="2" + style={{ borderTop: "1px solid var(--gray-a6)" }} + /> + <Flex align="center" justify="between" width="100%" mb="1"> + <Text size="2" weight="bold"> + Cost + </Text> + <Text size="2"> + <Flex align="center" gap="2"> + {Math.round(totalCoins)}{" "} + <Coin width="14px" height="14px" /> + </Flex> + </Text> + </Flex> + {metering_coins_prompt > 0 && ( + <CoinDisplay + label="Prompt" + value={metering_coins_prompt} + /> + )} + {metering_coins_generated > 0 && ( + <CoinDisplay + label="Completion" + value={metering_coins_generated} + /> + )} + {metering_coins_cache_read > 0 && ( + <CoinDisplay + label="Cache read" + value={metering_coins_cache_read} + /> + )} + {metering_coins_cache_creation > 0 && ( + <CoinDisplay + label="Cache creation" + value={metering_coins_cache_creation} + /> + )} + </> + )} + + {hasUsd && ( + <> + <Box + my="2" + style={{ borderTop: "1px solid var(--gray-a6)" }} + /> + <Flex align="center" justify="between" width="100%" mb="1"> + <Text size="2" weight="bold"> + Cost + </Text> + <Text size="2">{formatUsd(meteringUsd.total_usd)}</Text> + </Flex> + <UsdDisplay label="Prompt" value={meteringUsd.prompt_usd} /> + <UsdDisplay + label="Completion" + value={meteringUsd.generated_usd} + /> + {meteringUsd.cache_read_usd !== undefined && + meteringUsd.cache_read_usd > 0 && ( + <UsdDisplay + label="Cache read" + value={meteringUsd.cache_read_usd} + /> + )} + {meteringUsd.cache_creation_usd !== undefined && + meteringUsd.cache_creation_usd > 0 && ( + <UsdDisplay + label="Cache creation" + value={meteringUsd.cache_creation_usd} + /> + )} + </> + )} + </Flex> + </HoverCard.Content> + </HoverCard.Root> + )} + </div> + </div> + ); +}; + +// Wrapper component to enable CSS hover on parent +export const MessageWrapper: React.FC<{ children: React.ReactNode }> = ({ + children, +}) => { + return <div className={styles.messageWrapper}>{children}</div>; +}; diff --git a/refact-agent/gui/src/components/ChatContent/MessageUsageInfo.tsx b/refact-agent/gui/src/components/ChatContent/MessageUsageInfo.tsx index 960836741..5a87c5c0b 100644 --- a/refact-agent/gui/src/components/ChatContent/MessageUsageInfo.tsx +++ b/refact-agent/gui/src/components/ChatContent/MessageUsageInfo.tsx @@ -3,6 +3,7 @@ import { Box, Card, Flex, Text, HoverCard } from "@radix-ui/themes"; import { Usage } from "../../services/refact"; import { formatNumberToFixed } from "../../utils/formatNumberToFixed"; import { calculateUsageInputTokens } from "../../utils/calculateUsageInputTokens"; +import { formatUsd } from "../../utils/getMetering"; import { Coin } from "../../images"; type MessageUsageInfoProps = { @@ -11,7 +12,6 @@ type MessageUsageInfoProps = { metering_coins_generated?: number; metering_coins_cache_creation?: number; metering_coins_cache_read?: number; - topOffset?: string; }; const TokenDisplay: React.FC<{ label: string; value: number }> = ({ @@ -42,13 +42,24 @@ const CoinDisplay: React.FC<{ label: string; value: number }> = ({ </Flex> ); +const UsdDisplay: React.FC<{ label: string; value: number | undefined }> = ({ + label, + value, +}) => ( + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="1" weight="bold"> + {label} + </Text> + <Text size="1">{formatUsd(value)}</Text> + </Flex> +); + export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ usage, metering_coins_prompt = 0, metering_coins_generated = 0, metering_coins_cache_creation = 0, metering_coins_cache_read = 0, - topOffset = "0", }) => { const outputTokens = useMemo(() => { return calculateUsageInputTokens({ @@ -71,18 +82,29 @@ export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ metering_coins_cache_read, ]); - const contextTokens = usage?.prompt_tokens ?? 0; + // Context tokens includes prompt + cache tokens for accurate context size + const contextTokens = useMemo(() => { + return calculateUsageInputTokens({ + usage, + keys: [ + "prompt_tokens", + "cache_creation_input_tokens", + "cache_read_input_tokens", + ], + }); + }, [usage]); + + const cacheReadTokens = usage?.cache_read_input_tokens ?? 0; + const cacheCreationTokens = usage?.cache_creation_input_tokens ?? 0; - if (!usage && totalCoins === 0) return null; + const meteringUsd = usage?.metering_usd; + const hasUsd = meteringUsd !== undefined && meteringUsd.total_usd > 0; + const showCoins = !hasUsd && totalCoins > 0; + + if (!usage && !showCoins && !hasUsd) return null; return ( - <Box - style={{ - position: "absolute", - right: "var(--space-3)", - top: topOffset, - }} - > + <Flex justify="end" mt="2"> <HoverCard.Root> <HoverCard.Trigger> <Card @@ -92,9 +114,26 @@ export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ cursor: "pointer", }} > - <Flex align="center" gap="1"> - <Text size="1">{Math.round(totalCoins)}</Text> - <Coin width="10px" height="10px" /> + <Flex align="center" gap="3"> + {contextTokens > 0 && ( + <Flex align="center" gap="1"> + <Text size="1" color="gray"> + ctx: + </Text> + <Text size="1">{formatNumberToFixed(contextTokens)}</Text> + </Flex> + )} + {showCoins && ( + <Flex align="center" gap="1"> + <Text size="1">{Math.round(totalCoins)}</Text> + <Coin width="10px" height="10px" /> + </Flex> + )} + {hasUsd && ( + <Flex align="center" gap="1"> + <Text size="1">{formatUsd(meteringUsd.total_usd)}</Text> + </Flex> + )} </Flex> </Card> </HoverCard.Trigger> @@ -107,6 +146,15 @@ export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ {usage && ( <> <TokenDisplay label="Context size" value={contextTokens} /> + {cacheReadTokens > 0 && ( + <TokenDisplay label="Cache read" value={cacheReadTokens} /> + )} + {cacheCreationTokens > 0 && ( + <TokenDisplay + label="Cache creation" + value={cacheCreationTokens} + /> + )} <TokenDisplay label="Output tokens" value={outputTokens} /> {usage.completion_tokens_details?.reasoning_tokens !== null && usage.completion_tokens_details?.reasoning_tokens !== @@ -120,7 +168,7 @@ export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ </> )} - {totalCoins > 0 && ( + {showCoins && ( <> <Box my="2" style={{ borderTop: "1px solid var(--gray-a6)" }} /> <Flex align="center" justify="between" width="100%" mb="1"> @@ -157,9 +205,40 @@ export const MessageUsageInfo: React.FC<MessageUsageInfoProps> = ({ )} </> )} + + {hasUsd && ( + <> + <Box my="2" style={{ borderTop: "1px solid var(--gray-a6)" }} /> + <Flex align="center" justify="between" width="100%" mb="1"> + <Text size="2" weight="bold"> + Cost + </Text> + <Text size="2">{formatUsd(meteringUsd.total_usd)}</Text> + </Flex> + <UsdDisplay label="Prompt" value={meteringUsd.prompt_usd} /> + <UsdDisplay + label="Completion" + value={meteringUsd.generated_usd} + /> + {meteringUsd.cache_read_usd !== undefined && + meteringUsd.cache_read_usd > 0 && ( + <UsdDisplay + label="Cache read" + value={meteringUsd.cache_read_usd} + /> + )} + {meteringUsd.cache_creation_usd !== undefined && + meteringUsd.cache_creation_usd > 0 && ( + <UsdDisplay + label="Cache creation" + value={meteringUsd.cache_creation_usd} + /> + )} + </> + )} </Flex> </HoverCard.Content> </HoverCard.Root> - </Box> + </Flex> ); }; diff --git a/refact-agent/gui/src/components/ChatContent/PlainText.tsx b/refact-agent/gui/src/components/ChatContent/PlainText.tsx index b3540f414..6562b7bd5 100644 --- a/refact-agent/gui/src/components/ChatContent/PlainText.tsx +++ b/refact-agent/gui/src/components/ChatContent/PlainText.tsx @@ -1,44 +1,64 @@ import React from "react"; -import { Container, Box, HoverCard } from "@radix-ui/themes"; +import { Container, Box, Flex, Text } from "@radix-ui/themes"; import { Markdown } from "./ContextFiles"; import styles from "./ChatContent.module.css"; -import { Small } from "../Text"; import { ScrollArea } from "../ScrollArea"; -import { FileTextIcon } from "@radix-ui/react-icons"; -import classNames from "classnames"; +import { + FileTextIcon, + ChevronDownIcon, + ChevronRightIcon, +} from "@radix-ui/react-icons"; +import * as Collapsible from "@radix-ui/react-collapsible"; export type PlainTextProps = { children: string; + id?: string; + defaultOpen?: boolean; }; -export const PlainText: React.FC<PlainTextProps> = ({ children }) => { - const [open, setOpen] = React.useState(false); +export const PlainText: React.FC<PlainTextProps> = ({ + children, + id, + defaultOpen = false, +}) => { + const [open, setOpen] = React.useState(defaultOpen); const text = "```text\n" + children + "\n```"; + const preview = + children.slice(0, 100).replace(/\n/g, " ") + + (children.length > 100 ? "..." : ""); + return ( - <Container position="relative"> - <HoverCard.Root onOpenChange={setOpen} open={open}> - <HoverCard.Trigger> - <Box> - <Small - as="span" - className={classNames(styles.file, styles.file_with_icon)} - > - <FileTextIcon width="1em" height="1em" /> - Plain text - </Small> - </Box> - </HoverCard.Trigger> - <ScrollArea scrollbars="both" asChild> - <HoverCard.Content - size="1" - maxHeight="50vh" - maxWidth="90vw" - avoidCollisions + <Container position="relative" data-plain-text-id={id}> + <Collapsible.Root open={open} onOpenChange={setOpen}> + <Collapsible.Trigger asChild> + <Flex + gap="2" + align="center" + py="1" + className={styles.plainTextTrigger} > - <Markdown>{text}</Markdown> - </HoverCard.Content> - </ScrollArea> - </HoverCard.Root> + <FileTextIcon width="14" height="14" /> + <Text size="1" weight="light" style={{ color: "var(--gray-10)" }}> + Plain text + </Text> + <Text size="1" style={{ color: "var(--gray-9)", flex: 1 }} truncate> + {preview} + </Text> + {open ? ( + <ChevronDownIcon width="14" height="14" /> + ) : ( + <ChevronRightIcon width="14" height="14" /> + )} + </Flex> + </Collapsible.Trigger> + <Collapsible.Content> + <ScrollArea scrollbars="both"> + <Box style={{ maxHeight: "300px" }} pl="4"> + <Markdown>{text}</Markdown> + </Box> + </ScrollArea> + </Collapsible.Content> + </Collapsible.Root> </Container> ); }; diff --git a/refact-agent/gui/src/components/ChatContent/QueuedMessage.tsx b/refact-agent/gui/src/components/ChatContent/QueuedMessage.tsx index 691d24c96..c46c7ae40 100644 --- a/refact-agent/gui/src/components/ChatContent/QueuedMessage.tsx +++ b/refact-agent/gui/src/components/ChatContent/QueuedMessage.tsx @@ -5,53 +5,40 @@ import { ClockIcon, LightningBoltIcon, } from "@radix-ui/react-icons"; -import { useAppDispatch } from "../../hooks"; -import { dequeueUserMessage, QueuedUserMessage } from "../../features/Chat"; +import { QueuedItem } from "../../features/Chat"; +import { useChatActions } from "../../hooks"; import styles from "./ChatContent.module.css"; import classNames from "classnames"; type QueuedMessageProps = { - queuedMessage: QueuedUserMessage; + queuedItem: QueuedItem; position: number; }; -function getMessagePreview(message: QueuedUserMessage["message"]): string { - if (typeof message.content === "string") { - return message.content; - } - // Handle multimodal content - const textPart = message.content.find( - (part) => "type" in part && part.type === "text", - ); - if (textPart && "text" in textPart) { - return textPart.text; - } - return "[Image attachment]"; -} - export const QueuedMessage: React.FC<QueuedMessageProps> = ({ - queuedMessage, + queuedItem, position, }) => { - const dispatch = useAppDispatch(); - const isPriority = queuedMessage.priority; + const { cancelQueued } = useChatActions(); const handleCancel = useCallback(() => { - dispatch(dequeueUserMessage({ queuedId: queuedMessage.id })); - }, [dispatch, queuedMessage.id]); - - const preview = getMessagePreview(queuedMessage.message); + void cancelQueued(queuedItem.client_request_id); + }, [cancelQueued, queuedItem.client_request_id]); return ( <Card className={classNames(styles.queuedMessage, { - [styles.queuedMessagePriority]: isPriority, + [styles.queuedMessagePriority]: queuedItem.priority, })} > - <Flex gap="2" align="start" justify="between"> + <Flex gap="2" align="center" justify="between"> <Flex gap="2" align="center" style={{ flex: 1, minWidth: 0 }}> - <Badge color={isPriority ? "blue" : "amber"} variant="soft" size="1"> - {isPriority ? ( + <Badge + color={queuedItem.priority ? "blue" : "amber"} + variant="soft" + size="1" + > + {queuedItem.priority ? ( <LightningBoltIcon width={12} height={12} /> ) : ( <ClockIcon width={12} height={12} /> @@ -62,9 +49,9 @@ export const QueuedMessage: React.FC<QueuedMessageProps> = ({ size="2" color="gray" className={styles.queuedMessageText} - title={preview} + title={queuedItem.preview} > - {preview} + {queuedItem.preview || `[${queuedItem.command_type}]`} </Text> </Flex> <IconButton diff --git a/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.module.css b/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.module.css index 0b2ba5c53..563674ea7 100644 --- a/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.module.css +++ b/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.module.css @@ -1,5 +1,92 @@ -.reasoningCallout { - border-left: 3px solid var(--gray-a11); - padding: 12px; - border-radius: 4px; +.card { + overflow: hidden; +} + +.header { + padding: var(--space-1) 0; + cursor: pointer; + user-select: none; + min-height: 24px; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.iconWrapper { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; + height: 16px; + flex-shrink: 0; +} + +.iconWrapper svg { + width: 14px; + height: 14px; +} + +.summary { + color: inherit; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + flex: 1; + min-width: 0; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.contentInner { + overflow: hidden; +} + +.content { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + padding: var(--space-2) 0 var(--space-1); + padding-left: 28px; + max-height: 400px; + overflow-y: auto; +} + +/* Thinking state - subtle text fade */ +@keyframes subtlePulse { + 0%, + 100% { + opacity: 0.9; + } + 50% { + opacity: 0.6; + } +} + +.thinking .summary { + color: var(--gray-11); + animation: subtlePulse 2s ease-in-out infinite; +} + +/* Respect reduced motion preference */ +@media (prefers-reduced-motion: reduce) { + .thinking .summary { + animation: none; + } } diff --git a/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.tsx b/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.tsx index ee3590412..e01d2efc7 100644 --- a/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.tsx +++ b/refact-agent/gui/src/components/ChatContent/ReasoningContent/ReasoningContent.tsx @@ -1,31 +1,220 @@ -import { Box, Flex, Heading, Text } from "@radix-ui/themes"; -import React from "react"; +import React, { + useState, + useEffect, + useRef, + useCallback, + useMemo, +} from "react"; +import { Flex, Text, Spinner } from "@radix-ui/themes"; +import classNames from "classnames"; +import { LightningBoltIcon } from "@radix-ui/react-icons"; import { Markdown } from "../../Markdown"; +import { useDelayedUnmount } from "../../shared/useDelayedUnmount"; +import { useCollapsibleStore } from "../useStoredOpen"; import styles from "./ReasoningContent.module.css"; +// Bold titles like "**Some Title**" often appear glued to the end of the +// previous sentence in reasoning summaries. Insert a paragraph break before +// them so Markdown renders each title as a separate block. +// The regex matches "**" preceded by a non-whitespace char where the first +// letter after "**" is uppercase — so inline bold like "**sorted**" is left +// alone. +function fixReasoningParagraphs(text: string): string { + return text.replace(/(\S)(\*\*[A-Z])/g, "$1\n\n$2"); +} + type ReasoningContentProps = { reasoningContent: string; onCopyClick: (text: string) => void; + isStreaming?: boolean; + hasMessageContent?: boolean; + stateKey?: string; }; +function formatDuration(seconds: number): string { + if (seconds < 60) { + return `${Math.round(seconds)} seconds`; + } + const minutes = Math.floor(seconds / 60); + const remainingSeconds = Math.round(seconds % 60); + if (remainingSeconds === 0) { + return `${minutes} minute${minutes > 1 ? "s" : ""}`; + } + return `${minutes}m ${remainingSeconds}s`; +} + export const ReasoningContent: React.FC<ReasoningContentProps> = ({ reasoningContent, onCopyClick, + isStreaming = false, + hasMessageContent = false, + stateKey, }) => { + const store = useCollapsibleStore(); + const storedOpen = stateKey && store ? store.get(stateKey) : undefined; + const hadStoredStateRef = useRef(storedOpen !== undefined); + + const [isOpen, setIsOpen] = useState(storedOpen ?? true); + const [thinkingDuration, setThinkingDuration] = useState<number | null>(null); + const startTimeRef = useRef<number | null>(null); + const userToggledRef = useRef(false); + const wasThinkingRef = useRef(false); + const durationCapturedRef = useRef(false); + const contentRef = useRef<HTMLDivElement>(null); + const userScrolledRef = useRef(false); + + useEffect(() => { + if (stateKey && store) store.set(stateKey, isOpen); + }, [stateKey, store, isOpen]); + + // Track thinking duration - stop when message content starts appearing + useEffect(() => { + const isActivelyThinking = + isStreaming && !!reasoningContent && !hasMessageContent; + + if (isActivelyThinking) { + // Started thinking + if (startTimeRef.current === null) { + startTimeRef.current = Date.now(); + } + wasThinkingRef.current = true; + } else if ( + wasThinkingRef.current && + startTimeRef.current !== null && + !durationCapturedRef.current + ) { + // Thinking finished (message content started or streaming ended) + const duration = (Date.now() - startTimeRef.current) / 1000; + setThinkingDuration(duration); + durationCapturedRef.current = true; + } + }, [isStreaming, reasoningContent, hasMessageContent]); + + // Auto-collapse after entire message finishes streaming + useEffect(() => { + if (!isStreaming && wasThinkingRef.current && !userToggledRef.current) { + const timer = setTimeout(() => { + setIsOpen(false); + }, 500); + return () => clearTimeout(timer); + } + }, [isStreaming]); + + // Handle initial mount for already-completed thinking blocks + useEffect(() => { + if (hadStoredStateRef.current) return; + if ( + !isStreaming && + reasoningContent && + thinkingDuration === null && + startTimeRef.current === null + ) { + // This is a historical thinking block (page reload or switching chats) + // Start collapsed since we don't have timing info + setIsOpen(false); + } + }, [isStreaming, reasoningContent, thinkingDuration]); + + // Auto-scroll to bottom while streaming + useEffect(() => { + if ( + isStreaming && + isOpen && + contentRef.current && + !userScrolledRef.current + ) { + const el = contentRef.current; + if (el.scrollTop + el.clientHeight + 20 < el.scrollHeight) { + el.scrollTop = el.scrollHeight; + } + } + }, [reasoningContent, isStreaming, isOpen]); + + // Reset user scroll flag when streaming starts + useEffect(() => { + if (isStreaming) { + userScrolledRef.current = false; + } + }, [isStreaming]); + + // Handle user scroll to disable auto-scroll + const handleScroll = useCallback(() => { + if (contentRef.current && isStreaming) { + const { scrollTop, scrollHeight, clientHeight } = contentRef.current; + // If user scrolled up (not at bottom), disable auto-scroll + const isAtBottom = scrollHeight - scrollTop - clientHeight < 20; + if (!isAtBottom) { + userScrolledRef.current = true; + } + } + }, [isStreaming]); + + const handleToggle = useCallback(() => { + userToggledRef.current = true; + setIsOpen((prev) => !prev); + }, []); + + const isActivelyThinking = + isStreaming && !!reasoningContent && !hasMessageContent; + const summaryText = isActivelyThinking + ? "Thinking..." + : thinkingDuration !== null + ? `Thought for ${formatDuration(thinkingDuration)}` + : "Thought"; + + const formattedContent = useMemo( + () => fixReasoningParagraphs(reasoningContent), + [reasoningContent], + ); + + const { shouldRender, isAnimatingOpen } = useDelayedUnmount(isOpen, 200); + return ( - <Box py="4" style={{ paddingRight: "50px" }}> - <Flex direction="column" gap="2" className={styles.reasoningCallout}> - <Heading as="h3" color="gray" size="2"> - Model Reasoning - </Heading> - <Text size="2" color="gray"> - <Markdown canHaveInteractiveElements={true} onCopyClick={onCopyClick}> - {reasoningContent} - </Markdown> + <div className={styles.card}> + <Flex + className={classNames( + styles.header, + isActivelyThinking && styles.thinking, + )} + align="center" + gap="2" + onClick={handleToggle} + > + <span className={styles.iconWrapper}> + {isActivelyThinking ? <Spinner size="1" /> : <LightningBoltIcon />} + </span> + <Text size="1" className={styles.summary}> + {summaryText} </Text> </Flex> - </Box> + + {shouldRender && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + )} + > + <div className={styles.contentInner}> + <div + ref={contentRef} + className={styles.content} + onScroll={handleScroll} + > + <Text size="1" color="gray" as="div"> + <Markdown + canHaveInteractiveElements={true} + onCopyClick={onCopyClick} + > + {formattedContent} + </Markdown> + </Text> + </div> + </div> + </div> + )} + </div> ); }; diff --git a/refact-agent/gui/src/components/ChatContent/ResendButton.tsx b/refact-agent/gui/src/components/ChatContent/ResendButton.tsx index c0f1407ed..e8b49cb3d 100644 --- a/refact-agent/gui/src/components/ChatContent/ResendButton.tsx +++ b/refact-agent/gui/src/components/ChatContent/ResendButton.tsx @@ -1,24 +1,21 @@ import React from "react"; -import { IconButton, Tooltip } from "@radix-ui/themes"; -import { useAppSelector } from "../../hooks"; +import { IconButton, HoverCard, Text } from "@radix-ui/themes"; +import { useAppSelector, useChatActions } from "../../hooks"; import { selectIsStreaming, selectIsWaiting, selectMessages, } from "../../features/Chat"; -import { useSendChatRequest } from "../../hooks/useSendChatRequest"; function useResendMessages() { const messages = useAppSelector(selectMessages); const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); - const { retry } = useSendChatRequest(); + const { regenerate } = useChatActions(); const handleResend = React.useCallback(() => { - if (messages.length > 0) { - retry(messages); - } - }, [messages, retry]); + void regenerate(); + }, [regenerate]); const shouldShow = React.useMemo(() => { if (messages.length === 0) return false; @@ -36,19 +33,31 @@ export const ResendButton = () => { if (!shouldShow) return null; return ( - <Tooltip content="Resend last messages"> - <IconButton variant="ghost" onClick={handleResend} size="2"> - <ResendIcon /> - </IconButton> - </Tooltip> + <HoverCard.Root> + <HoverCard.Trigger> + <IconButton + type="button" + variant="ghost" + onClick={handleResend} + size="1" + > + <ResendIcon /> + </IconButton> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Resend last messages + </Text> + </HoverCard.Content> + </HoverCard.Root> ); }; const ResendIcon: React.FC = () => { return ( <svg - height="20" - width="20" + height="15" + width="15" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg" diff --git a/refact-agent/gui/src/components/ChatContent/ServerContentBlocks.tsx b/refact-agent/gui/src/components/ChatContent/ServerContentBlocks.tsx new file mode 100644 index 000000000..ebb4ba172 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ServerContentBlocks.tsx @@ -0,0 +1,203 @@ +import React, { useMemo, useState } from "react"; +import { MagnifyingGlassIcon } from "@radix-ui/react-icons"; +import { Box, Flex, Link, Text } from "@radix-ui/themes"; +import { ToolCard } from "./ToolCard"; +import styles from "./ToolCard/OpenAIResponsesTool.module.css"; +import scrollbarStyles from "../shared/scrollbar.module.css"; + +type ServerToolUse = { + type: "server_tool_use"; + id: string; + name: string; + input?: Record<string, unknown>; +}; + +type WebSearchResult = { + type: "web_search_result"; + title?: string; + url?: string; + encrypted_content?: string; + page_age?: string | null; +}; + +type WebSearchToolResult = { + type: "web_search_tool_result"; + tool_use_id: string; + content?: WebSearchResult[]; +}; + +type ServerBlock = + | ServerToolUse + | WebSearchToolResult + | Record<string, unknown>; + +function isServerToolUse(block: ServerBlock): block is ServerToolUse { + return "type" in block && block.type === "server_tool_use"; +} + +function isWebSearchToolResult( + block: ServerBlock, +): block is WebSearchToolResult { + return "type" in block && block.type === "web_search_tool_result"; +} + +function isSafeHttpUrl(url: string): boolean { + try { + const parsed = new URL(url); + return parsed.protocol === "http:" || parsed.protocol === "https:"; + } catch { + return false; + } +} + +type WebSearchGroup = { + toolUse: ServerToolUse; + result?: WebSearchToolResult; +}; + +function groupServerBlocks(blocks: unknown[]): { + webSearchGroups: WebSearchGroup[]; + ungrouped: unknown[]; +} { + const typedBlocks = blocks as ServerBlock[]; + const webSearchGroups: WebSearchGroup[] = []; + const grouped = new Set<number>(); + + for (let i = 0; i < typedBlocks.length; i++) { + const block = typedBlocks[i]; + if (isServerToolUse(block) && block.name === "web_search") { + const resultIdx = typedBlocks.findIndex( + (b, j) => + j > i && isWebSearchToolResult(b) && b.tool_use_id === block.id, + ); + const group: WebSearchGroup = { toolUse: block }; + grouped.add(i); + if (resultIdx >= 0) { + group.result = typedBlocks[resultIdx] as WebSearchToolResult; + grouped.add(resultIdx); + } + webSearchGroups.push(group); + } + } + + const ungrouped = typedBlocks.filter((_, i) => !grouped.has(i)); + return { webSearchGroups, ungrouped }; +} + +const WebSearchBlock: React.FC<{ group: WebSearchGroup }> = ({ group }) => { + const [isOpen, setIsOpen] = useState(false); + + const query = + typeof group.toolUse.input?.query === "string" + ? group.toolUse.input.query + : undefined; + + const results = useMemo(() => { + if (!group.result?.content) return []; + return group.result.content.slice(0, 50); + }, [group.result]); + + const summary = query ? ( + <> + Web Search: <span className={styles.inlineCode}>{query}</span> + </> + ) : ( + "Web Search" + ); + + return ( + <ToolCard + icon={<MagnifyingGlassIcon />} + summary={summary} + status="success" + isOpen={isOpen} + onToggle={() => setIsOpen((prev) => !prev)} + > + {results.length > 0 && ( + <Box> + <Text size="1" color="gray"> + Results ({results.length}) + </Text> + <Box className={styles.resultList}> + {results.map((r, idx) => { + const title = r.title ?? "(no title)"; + const url = r.url ?? ""; + const safeUrl = url && isSafeHttpUrl(url) ? url : ""; + return ( + <Box key={idx} className={styles.resultItem}> + <Flex direction="column" gap="1"> + {safeUrl ? ( + <Link + href={safeUrl} + target="_blank" + rel="noopener noreferrer" + size="2" + > + {title} + </Link> + ) : ( + <Text size="2" weight="medium"> + {title} + </Text> + )} + {safeUrl && ( + <Text size="1" color="gray" className={styles.inlineCode}> + {safeUrl} + </Text> + )} + </Flex> + </Box> + ); + })} + </Box> + </Box> + )} + {results.length === 0 && !group.result && ( + <Text size="1" color="gray"> + Waiting for results… + </Text> + )} + </ToolCard> + ); +}; + +type ServerContentBlocksProps = { + blocks: unknown[]; +}; + +export const ServerContentBlocks: React.FC<ServerContentBlocksProps> = ({ + blocks, +}) => { + const { webSearchGroups, ungrouped } = useMemo( + () => groupServerBlocks(blocks), + [blocks], + ); + + if (webSearchGroups.length === 0 && ungrouped.length === 0) return null; + + return ( + <Box> + {webSearchGroups.map((group) => ( + <WebSearchBlock key={group.toolUse.id} group={group} /> + ))} + {ungrouped.length > 0 && ( + <Box mt="2"> + <Text size="1" color="gray"> + Server blocks ({ungrouped.length}) + </Text> + <pre + style={{ + fontSize: "var(--font-size-1)", + color: "var(--gray-11)", + overflowX: "auto", + maxHeight: 200, + }} + className={scrollbarStyles.scrollbarThin} + > + {JSON.stringify(ungrouped, null, 2)} + </pre> + </Box> + )} + </Box> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/SystemPrompt.module.css b/refact-agent/gui/src/components/ChatContent/SystemPrompt.module.css new file mode 100644 index 000000000..d6f1bc3ba --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/SystemPrompt.module.css @@ -0,0 +1,66 @@ +.card { + overflow: hidden; +} + +.header { + padding: var(--space-1) 0; + cursor: pointer; + user-select: none; + min-height: 24px; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.icon { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; + height: 16px; + flex-shrink: 0; +} + +.icon svg { + width: 14px; + height: 14px; +} + +.summary { + color: inherit; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.contentInner { + overflow: hidden; +} + +.content { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + padding: var(--space-2) 0 var(--space-1); + max-height: 400px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/SystemPrompt.tsx b/refact-agent/gui/src/components/ChatContent/SystemPrompt.tsx new file mode 100644 index 000000000..3d545b207 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/SystemPrompt.tsx @@ -0,0 +1,53 @@ +import React, { useState, useCallback } from "react"; +import { Box, Text, Flex } from "@radix-ui/themes"; +import classNames from "classnames"; +import { ReaderIcon } from "@radix-ui/react-icons"; +import { Markdown } from "../Markdown"; +import { useDelayedUnmount } from "../shared/useDelayedUnmount"; +import styles from "./SystemPrompt.module.css"; + +export const SystemPrompt: React.FC<{ + content: string; +}> = ({ content }) => { + const [isOpen, setIsOpen] = useState(false); + const { shouldRender, isAnimatingOpen } = useDelayedUnmount(isOpen, 200); + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + if (!content.trim()) return null; + + return ( + <div className={styles.card}> + <Flex + className={styles.header} + align="center" + gap="2" + onClick={handleToggle} + > + <span className={styles.icon}> + <ReaderIcon /> + </span> + <Text size="1" className={styles.summary}> + System prompt + </Text> + </Flex> + + {shouldRender && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + )} + > + <div className={styles.contentInner}> + <Box className={styles.content}> + <Markdown>{content}</Markdown> + </Box> + </div> + </div> + )} + </div> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.module.css new file mode 100644 index 000000000..5bea74815 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.module.css @@ -0,0 +1,7 @@ +.content { + padding: var(--space-2) 0; +} + +.questionItem { + padding: var(--space-2) 0; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.tsx new file mode 100644 index 000000000..09af867a9 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/AskQuestionsTool.tsx @@ -0,0 +1,421 @@ +import React, { + useMemo, + useState, + useCallback, + useEffect, + useRef, +} from "react"; +import { + QuestionMarkCircledIcon, + CheckCircledIcon, +} from "@radix-ui/react-icons"; +import { + Box, + Flex, + Text, + Button, + TextArea, + RadioGroup, + Checkbox, +} from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { Markdown } from "../../Markdown"; +import { useAppSelector, useChatActions } from "../../../hooks"; +import { + selectToolResultById, + selectMessages, +} from "../../../features/Chat/Thread/selectors"; +import { + ToolCall, + isUserMessage, + isToolMessage, +} from "../../../services/refact/types"; +import styles from "./AskQuestionsTool.module.css"; + +interface QuestionItem { + id: string; + type: "yes_no" | "single_select" | "multi_select" | "free_text"; + text: string; + options?: string[]; +} + +interface AskQuestionsResult { + type: "ask_questions"; + tool_call_id: string; + questions: QuestionItem[]; +} + +interface AskQuestionsToolProps { + toolCall: ToolCall; +} + +function formatAnswers( + marker: string, + questions: QuestionItem[], + answers: Record<string, string | string[]>, + additional: string, +): string { + const lines = [marker]; + + for (const q of questions) { + const answer = answers[q.id]; + lines.push(`> [${q.id}] ${q.text}`); + if (Array.isArray(answer)) { + lines.push(answer.length > 0 ? answer.join(", ") : "(no selection)"); + } else if (answer && answer.includes("\n")) { + lines.push("```"); + lines.push(answer); + lines.push("```"); + } else { + lines.push(answer || "(no answer)"); + } + lines.push(""); + } + + if (additional.trim()) { + lines.push("> [__additional__] Additional comments"); + if (additional.includes("\n")) { + lines.push("```"); + lines.push(additional.trim()); + lines.push("```"); + } else { + lines.push(additional.trim()); + } + } + + return lines.join("\n").trim(); +} + +function parseAnswersFromMessage( + content: string, + questions: QuestionItem[], +): Record<string, string> | null { + const result: Record<string, string> = {}; + const idSet = new Set(questions.map((q) => q.id)); + idSet.add("__additional__"); + + const regex = /^> \[([^\]]+)\]/gm; + let match; + const positions: { id: string; start: number }[] = []; + + while ((match = regex.exec(content)) !== null) { + if (idSet.has(match[1])) { + positions.push({ id: match[1], start: match.index }); + } + } + + for (let i = 0; i < positions.length; i++) { + const { id, start } = positions[i]; + const lineEnd = content.indexOf("\n", start); + if (lineEnd === -1) continue; // Guard against missing newline + const answerStart = lineEnd + 1; + const answerEnd = + i + 1 < positions.length ? positions[i + 1].start : content.length; + + let answer = content.slice(answerStart, answerEnd).trim(); + if (answer.startsWith("```") && answer.includes("```", 3)) { + const codeStart = answer.indexOf("\n") + 1; + const codeEnd = answer.lastIndexOf("```"); + answer = answer.slice(codeStart, codeEnd).trim(); + } + if (answer) { + result[id] = answer; + } + } + + return Object.keys(result).length > 0 ? result : null; +} + +const QuestionWidget: React.FC<{ + question: QuestionItem; + value: string | string[]; + onChange: (val: string | string[]) => void; +}> = ({ question, value, onChange }) => { + switch (question.type) { + case "yes_no": + return ( + <Box className={styles.questionItem}> + <Box mb="2"> + <Markdown>{question.text}</Markdown> + </Box> + <RadioGroup.Root + value={typeof value === "string" ? value : ""} + onValueChange={onChange} + > + <Flex gap="3"> + <RadioGroup.Item value="Yes">Yes</RadioGroup.Item> + <RadioGroup.Item value="No">No</RadioGroup.Item> + </Flex> + </RadioGroup.Root> + </Box> + ); + + case "single_select": + return ( + <Box className={styles.questionItem}> + <Box mb="2"> + <Markdown>{question.text}</Markdown> + </Box> + <RadioGroup.Root + value={typeof value === "string" ? value : ""} + onValueChange={onChange} + > + <Flex direction="column" gap="2"> + {question.options?.map((opt) => ( + <RadioGroup.Item key={opt} value={opt}> + {opt} + </RadioGroup.Item> + ))} + </Flex> + </RadioGroup.Root> + </Box> + ); + + case "multi_select": + return ( + <Box className={styles.questionItem}> + <Box mb="2"> + <Markdown>{question.text}</Markdown> + </Box> + <Flex direction="column" gap="2"> + {question.options?.map((opt) => ( + <Flex key={opt} align="center" gap="2"> + <Checkbox + checked={Array.isArray(value) && value.includes(opt)} + onCheckedChange={(checked) => { + const current = Array.isArray(value) ? value : []; + if (checked === true) { + onChange([...current, opt]); + } else { + onChange(current.filter((v) => v !== opt)); + } + }} + /> + <Text size="2">{opt}</Text> + </Flex> + ))} + </Flex> + </Box> + ); + + case "free_text": + return ( + <Box className={styles.questionItem}> + <Box mb="2"> + <Markdown>{question.text}</Markdown> + </Box> + <TextArea + value={typeof value === "string" ? value : ""} + onChange={(e) => onChange(e.target.value)} + placeholder="Type your answer..." + /> + </Box> + ); + + default: + return null; + } +}; + +export const AskQuestionsTool: React.FC<AskQuestionsToolProps> = ({ + toolCall, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle, setIsOpen] = useStoredOpen(storeKey, true); + const [answers, setAnswers] = useState<Record<string, string | string[]>>({}); + const [additionalText, setAdditionalText] = useState(""); + const hasCollapsedManualRef = useRef(false); + + const { submit } = useChatActions(); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const messages = useAppSelector(selectMessages); + + const data = useMemo((): AskQuestionsResult | null => { + if (!maybeResult || typeof maybeResult.content !== "string") return null; + try { + return JSON.parse(maybeResult.content) as AskQuestionsResult; + } catch { + return null; + } + }, [maybeResult]); + + const marker = `[QA:${toolCall.id}]`; + + const nextUserMessage = useMemo(() => { + if (!maybeResult) return null; + + let foundToolResult = false; + for (const msg of messages) { + if (isToolMessage(msg) && msg.tool_call_id === toolCall.id) { + foundToolResult = true; + continue; + } + if (foundToolResult && isUserMessage(msg)) { + return msg; + } + } + return null; + }, [messages, maybeResult, toolCall.id]); + + const getContentText = useCallback((content: unknown): string => { + if (typeof content === "string") return content; + if (!Array.isArray(content)) return ""; + for (const item of content) { + if (typeof item === "object" && item !== null) { + const obj = item as Record<string, unknown>; + if (obj.type === "text" && typeof obj.text === "string") { + return obj.text; + } + if (obj.m_type === "text" && typeof obj.m_content === "string") { + return obj.m_content; + } + } + } + return ""; + }, []); + + const answeredViaForm = useMemo(() => { + if (!nextUserMessage) return false; + const content = getContentText(nextUserMessage.content); + return content.startsWith(marker); + }, [nextUserMessage, marker, getContentText]); + + const parsedAnswers = useMemo(() => { + if (!answeredViaForm || !nextUserMessage || !data) return null; + const content = getContentText(nextUserMessage.content); + return parseAnswersFromMessage(content, data.questions); + }, [answeredViaForm, nextUserMessage, data, getContentText]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if (maybeResult.tool_failed) return "error"; + if (!nextUserMessage) return "running"; + return "success"; + }, [maybeResult, nextUserMessage]); + + useEffect(() => { + if (nextUserMessage && !answeredViaForm && !hasCollapsedManualRef.current) { + hasCollapsedManualRef.current = true; + setIsOpen(false); + } + }, [nextUserMessage, answeredViaForm, setIsOpen]); + + const handleSubmit = useCallback(() => { + if (!data) return; + const formatted = formatAnswers( + marker, + data.questions, + answers, + additionalText, + ); + void submit(formatted); + }, [data, marker, answers, additionalText, submit]); + + const hasNextMessage = !!nextUserMessage; + + if (!hasNextMessage && data) { + return ( + <ToolCard + icon={<QuestionMarkCircledIcon />} + summary="Questions for you" + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + <Box className={styles.content}> + <Flex direction="column" gap="3"> + {data.questions.map((q) => ( + <QuestionWidget + key={q.id} + question={q} + value={answers[q.id] || (q.type === "multi_select" ? [] : "")} + onChange={(val) => + setAnswers((prev) => ({ ...prev, [q.id]: val })) + } + /> + ))} + + <Box className={styles.questionItem}> + <Text size="1" color="gray" mb="1" as="p"> + Additional comments (optional) + </Text> + <TextArea + value={additionalText} + onChange={(e) => setAdditionalText(e.target.value)} + placeholder="Add any extra context..." + /> + </Box> + + <Button onClick={handleSubmit} size="2"> + Submit Answers + </Button> + </Flex> + </Box> + </ToolCard> + ); + } + + if (answeredViaForm && data && parsedAnswers) { + return ( + <ToolCard + icon={<CheckCircledIcon />} + summary="Questions answered" + status="success" + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + <Box className={styles.content}> + <Flex direction="column" gap="2"> + {data.questions.map((q) => ( + <Box key={q.id}> + <Markdown>{q.text}</Markdown> + <Text color="gray" size="2" ml="2"> + → {parsedAnswers[q.id] || "(no answer)"} + </Text> + </Box> + ))} + {parsedAnswers.__additional__ && ( + <Box mt="2"> + <Text size="2" color="gray" style={{ fontStyle: "italic" }}> + {parsedAnswers.__additional__} + </Text> + </Box> + )} + </Flex> + </Box> + </ToolCard> + ); + } + + return ( + <ToolCard + icon={<QuestionMarkCircledIcon />} + summary="Questions (answered manually)" + status="success" + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {data && ( + <Box className={styles.content}> + <Flex direction="column" gap="1"> + {data.questions.map((q) => ( + <Box key={q.id}> + <Markdown>{`• ${q.text}`}</Markdown> + </Box> + ))} + </Flex> + </Box> + )} + </ToolCard> + ); +}; + +export default AskQuestionsTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/CodeReviewTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/CodeReviewTool.tsx new file mode 100644 index 000000000..a28e2d238 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/CodeReviewTool.tsx @@ -0,0 +1,20 @@ +import React from "react"; +import { MagnifyingGlassIcon } from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; + +interface CodeReviewToolProps { + toolCall: ToolCall; +} + +export const CodeReviewTool: React.FC<CodeReviewToolProps> = ({ toolCall }) => { + return ( + <StreamingToolCard + toolCall={toolCall} + icon={<MagnifyingGlassIcon />} + summary="Review code" + /> + ); +}; + +export default CodeReviewTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.module.css new file mode 100644 index 000000000..b2f30dd70 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.module.css @@ -0,0 +1,70 @@ +.list { + margin-top: var(--space-2); + padding-top: var(--space-2); + border-top: 1px solid var(--gray-a3); + padding-left: var(--space-3); +} + +.item { + overflow: hidden; +} + +.header { + padding: var(--space-1) 0; + cursor: pointer; + user-select: none; + min-height: 24px; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.icon { + width: 12px; + height: 12px; + color: var(--gray-9); + flex-shrink: 0; +} + +.filename { + color: var(--accent-11); + font-family: var(--code-font-family); + font-size: 0.9em; + cursor: pointer; +} + +.filename:hover { + text-decoration: underline; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.contentInner { + overflow: hidden; +} + +.content { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + padding: var(--space-1) 0; + padding-left: var(--space-3); + max-height: 200px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.tsx new file mode 100644 index 000000000..a3aeeca33 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ContextFileList.tsx @@ -0,0 +1,119 @@ +import React, { useState, useCallback } from "react"; +import { FileTextIcon } from "@radix-ui/react-icons"; +import { Box, Flex, Text } from "@radix-ui/themes"; +import classNames from "classnames"; +import { ChatContextFile } from "../../../services/refact/types"; +import { useEventsBusForIDE } from "../../../hooks"; +import { ShikiCodeBlock } from "../../Markdown"; +import { useDelayedUnmount } from "../../shared/useDelayedUnmount"; +import styles from "./ContextFileList.module.css"; + +function filename(path: string): string { + const parts = path.split("/"); + return parts[parts.length - 1] || path; +} + +function formatFileName( + filePath: string, + line1?: number, + line2?: number, +): string { + const name = filename(filePath); + if (line1 && line2 && line1 !== 0 && line2 !== 0) { + return `${name}:${line1}-${line2}`; + } + return name; +} + +function getExtensionFromName(name: string): string { + const dot = name.lastIndexOf("."); + if (dot === -1) return ""; + return name.substring(dot + 1).replace(/:\d*-\d*/, ""); +} + +interface ContextFileItemProps { + file: ChatContextFile; + onOpenFile: (file: { file_path: string; line?: number }) => Promise<void>; +} + +const ContextFileItem: React.FC<ContextFileItemProps> = ({ + file, + onOpenFile, +}) => { + const [isOpen, setIsOpen] = useState(false); + const { shouldRender, isAnimatingOpen } = useDelayedUnmount(isOpen, 200); + const displayName = formatFileName(file.file_name, file.line1, file.line2); + const extension = getExtensionFromName(file.file_name); + + const handleFileClick = useCallback( + (e: React.MouseEvent) => { + e.stopPropagation(); + void onOpenFile({ file_path: file.file_name, line: file.line1 }); + }, + [onOpenFile, file.file_name, file.line1], + ); + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + return ( + <div className={styles.item}> + <Flex + className={styles.header} + align="center" + gap="2" + onClick={handleToggle} + > + <FileTextIcon className={styles.icon} /> + <Text size="1" className={styles.filename} onClick={handleFileClick}> + {displayName} + </Text> + </Flex> + + {shouldRender && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + )} + > + <div className={styles.contentInner}> + <Box className={styles.content}> + <ShikiCodeBlock + className={extension ? `language-${extension}` : undefined} + showLineNumbers={false} + > + {file.file_content} + </ShikiCodeBlock> + </Box> + </div> + </div> + )} + </div> + ); +}; + +interface ContextFileListProps { + files: ChatContextFile[]; +} + +export const ContextFileList: React.FC<ContextFileListProps> = ({ files }) => { + const { queryPathThenOpenFile } = useEventsBusForIDE(); + + if (files.length === 0) return null; + + return ( + <Flex direction="column" gap="1" className={styles.list}> + {files.map((file, index) => ( + <ContextFileItem + key={`${file.file_name}-${file.line1}-${file.line2}-${index}`} + file={file} + onOpenFile={queryPathThenOpenFile} + /> + ))} + </Flex> + ); +}; + +export default ContextFileList; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.module.css new file mode 100644 index 000000000..b5d0bce15 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.module.css @@ -0,0 +1,149 @@ +.filename { + font-family: var(--code-font-family); + font-size: inherit; + color: var(--accent-11); + cursor: pointer; + vertical-align: baseline; +} + +.filename:hover { + text-decoration: underline; +} + +.statsInline { + font-family: var(--code-font-family); + font-size: 12px; + vertical-align: baseline; +} + +.statsInline .added { + color: var(--green-11); +} + +.statsInline .removed { + color: var(--red-11); + margin-left: 4px; +} + +.errorContent { + padding: var(--space-2); + background: var(--red-a2); + border-radius: var(--radius-2); + margin-bottom: var(--space-2); +} + +.actionBar { + padding-bottom: var(--space-2); +} + +.diffContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 400px; + overflow-y: auto; + overflow-x: auto; + background-color: var(--gray-a2); + border-radius: var(--radius-2); + font-family: var(--code-font-family); + font-size: 12px; + line-height: 1.5; +} + +.diffBlock { + padding: var(--space-1) 0; +} + +.diffLine { + min-width: min-content; + display: flex; +} + +.lineNumber { + width: 40px; + min-width: 40px; + text-align: right; + padding-right: var(--space-2); + color: var(--gray-9); + user-select: none; + flex-shrink: 0; +} + +.sign { + width: 20px; + min-width: 20px; + text-align: center; + flex-shrink: 0; + font-weight: 600; +} + +.lineContent { + white-space: pre; + flex: 1; + padding-right: var(--space-2); +} + +.remove { + background-color: rgba(255, 100, 100, 0.15); +} + +.remove .lineNumber { + background-color: rgba(255, 100, 100, 0.2); +} + +.remove .sign { + color: var(--red-11); + background-color: rgba(255, 100, 100, 0.2); +} + +.remove .lineContent { + color: var(--red-11); +} + +.add { + background-color: rgba(100, 255, 100, 0.1); +} + +.add .lineNumber { + background-color: rgba(100, 255, 100, 0.15); +} + +.add .sign { + color: var(--green-11); + background-color: rgba(100, 255, 100, 0.15); +} + +.add .lineContent { + color: var(--green-11); +} + +.fileList { + gap: var(--space-1); +} + +.fileItem { + border-radius: var(--radius-2); + background-color: var(--gray-a2); +} + +.fileHeader { + cursor: pointer; + padding: var(--space-2); + transition: filter 0.15s ease; +} + +.fileHeader:hover { + filter: brightness(1.2); +} + +.stats { + font-family: var(--code-font-family); + font-size: 12px; +} + +.stats .added { + color: var(--green-11); +} + +.stats .removed { + color: var(--red-11); + margin-left: 4px; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.tsx new file mode 100644 index 000000000..e026693ae --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/EditTool.tsx @@ -0,0 +1,390 @@ +import React, { useMemo, useState, useCallback } from "react"; +import { Pencil1Icon, PlusIcon } from "@radix-ui/react-icons"; +import { Flex, Text, Box, Spinner, Button } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector, useEventsBusForIDE } from "../../../hooks"; +import { + selectManyDiffMessageByIds, + selectIsStreaming, + selectIsWaiting, + selectToolResultById, +} from "../../../features/Chat/Thread/selectors"; +import { selectChatId, selectCanPaste } from "../../../features/Chat"; +import { ToolCall, DiffChunk } from "../../../services/refact/types"; +import { toolsApi } from "../../../services/refact"; +import { + parseRawTextDocToolCall, + isRawTextDocToolCall, + isCreateTextDocToolCall, + isUpdateTextDocToolCall, + isUpdateTextDocByLinesToolCall, +} from "../../Tools/types"; +import { basename } from "./utils"; +import styles from "./EditTool.module.css"; + +interface EditToolProps { + toolCall: ToolCall; + diffs?: DiffChunk[]; +} + +function getDiffStats(diffs: DiffChunk[]): { added: number; removed: number } { + let added = 0; + let removed = 0; + for (const diff of diffs) { + added += diff.lines_add.split("\n").filter((l) => l.length > 0).length; + removed += diff.lines_remove.split("\n").filter((l) => l.length > 0).length; + } + return { added, removed }; +} + +function getFilePath(toolCall: ToolCall): string | null { + try { + const args = JSON.parse(toolCall.function.arguments) as { path?: string }; + return args.path ?? null; + } catch { + return null; + } +} + +function isCreateTool(name: string | undefined): boolean { + return name === "create_textdoc"; +} + +const DiffLine: React.FC<{ + lineNumber?: number; + sign: string; + line: string; +}> = ({ lineNumber, sign, line }) => { + const isRemove = sign === "-"; + const isAdd = sign === "+"; + const rowClass = isRemove ? styles.remove : isAdd ? styles.add : ""; + return ( + <div className={`${styles.diffLine} ${rowClass}`}> + <span className={styles.lineNumber}>{lineNumber ?? ""}</span> + <span className={styles.sign}>{sign}</span> + <span className={styles.lineContent}>{line}</span> + </div> + ); +}; + +const DiffBlock: React.FC<{ diff: DiffChunk }> = ({ diff }) => { + const removeLines = diff.lines_remove.split("\n").filter((l) => l.length > 0); + const addLines = diff.lines_add.split("\n").filter((l) => l.length > 0); + + return ( + <Box className={styles.diffBlock}> + {removeLines.map((line, i) => ( + <DiffLine + key={`remove-${i}`} + lineNumber={diff.line1 + i} + sign="-" + line={line} + /> + ))} + {addLines.map((line, i) => ( + <DiffLine + key={`add-${i}`} + lineNumber={diff.line1 + i} + sign="+" + line={line} + /> + ))} + </Box> + ); +}; + +interface FileEditItemProps { + fileName: string; + diffs: DiffChunk[]; + onOpenFile: () => void; +} + +const FileEditItem: React.FC<FileEditItemProps> = ({ + fileName, + diffs, + onOpenFile, +}) => { + const [isOpen, setIsOpen] = useState(false); + const stats = useMemo(() => getDiffStats(diffs), [diffs]); + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + const handleOpenClick = useCallback( + (e: React.MouseEvent) => { + e.stopPropagation(); + onOpenFile(); + }, + [onOpenFile], + ); + + return ( + <div className={styles.fileItem}> + <Flex + className={styles.fileHeader} + align="center" + gap="2" + onClick={handleToggle} + > + <Text size="1" className={styles.filename} onClick={handleOpenClick}> + {basename(fileName)} + </Text> + <Text size="1" className={styles.stats}> + {stats.added > 0 && ( + <span className={styles.added}>+{stats.added}</span> + )} + {stats.removed > 0 && ( + <span className={styles.removed}>−{stats.removed}</span> + )} + </Text> + </Flex> + + {isOpen && ( + <Box className={styles.diffContent}> + {diffs.map((diff, i) => ( + <DiffBlock key={i} diff={diff} /> + ))} + </Box> + )} + </div> + ); +}; + +export const EditTool: React.FC<EditToolProps> = ({ toolCall, diffs = [] }) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const { queryPathThenOpenFile, diffPasteBack, sendToolCallToIde } = + useEventsBusForIDE(); + const [requestDryRun, dryRunResult] = toolsApi.useDryRunForEditToolMutation(); + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + const canPaste = useAppSelector(selectCanPaste); + const chatId = useAppSelector(selectChatId); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const toolDiffs = useAppSelector( + selectManyDiffMessageByIds(toolCall.id ? [toolCall.id] : []), + ); + + const allDiffs = useMemo(() => { + const fromProps = diffs; + const fromStore = toolDiffs.flatMap((d) => d.content); + return fromProps.length > 0 ? fromProps : fromStore; + }, [diffs, toolDiffs]); + + const hasDiffs = allDiffs.length > 0; + const hasResult = maybeResult !== undefined; + + const parsedToolCall = useMemo(() => { + if (!isRawTextDocToolCall(toolCall)) return null; + return parseRawTextDocToolCall(toolCall); + }, [toolCall]); + + const replaceContent = useMemo(() => { + if (!parsedToolCall) return null; + if (isCreateTextDocToolCall(parsedToolCall)) { + return parsedToolCall.function.arguments.content; + } + if (isUpdateTextDocToolCall(parsedToolCall)) { + return parsedToolCall.function.arguments.replacement; + } + if (isUpdateTextDocByLinesToolCall(parsedToolCall)) { + return parsedToolCall.function.arguments.content; + } + return null; + }, [parsedToolCall]); + + const handleApplyDiff = useCallback(() => { + if (!parsedToolCall) return; + requestDryRun({ + toolName: parsedToolCall.function.name, + toolArgs: parsedToolCall.function.arguments, + }) + .then((results) => { + if (results.data) { + sendToolCallToIde(parsedToolCall, results.data, chatId); + } + }) + .catch(() => { + /* ignore */ + }); + }, [chatId, parsedToolCall, requestDryRun, sendToolCallToIde]); + + const handleReplace = useCallback(() => { + if (replaceContent) { + diffPasteBack(replaceContent, chatId, toolCall.id); + } + }, [chatId, diffPasteBack, replaceContent, toolCall.id]); + + const filePath = useMemo(() => { + const fromArgs = getFilePath(toolCall); + if (fromArgs) return fromArgs; + if (allDiffs.length > 0) return allDiffs[0].file_name; + return null; + }, [toolCall, allDiffs]); + const isCreate = isCreateTool(toolCall.function.name); + const stats = useMemo(() => getDiffStats(allDiffs), [allDiffs]); + + const filesByName = useMemo(() => { + const grouped: Record<string, DiffChunk[]> = {}; + for (const diff of allDiffs) { + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + grouped[diff.file_name] = (grouped[diff.file_name] || []).concat(diff); + } + return grouped; + }, [allDiffs]); + + const fileNames = Object.keys(filesByName); + const isSingleFile = fileNames.length <= 1; + + const handleFileClick = useCallback( + (e: React.MouseEvent, path: string) => { + e.stopPropagation(); + void queryPathThenOpenFile({ file_path: path }); + }, + [queryPathThenOpenFile], + ); + + const status: ToolStatus = useMemo(() => { + // Check if tool failed (returned error result instead of diff) + if ( + maybeResult && + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + // Still running if no diffs AND no result AND streaming/waiting + if (!hasDiffs && !hasResult && (isStreaming || isWaiting)) return "running"; + // Has result but no diffs - could be an error message + if (hasResult && !hasDiffs) { + return "error"; + } + return "success"; + }, [hasDiffs, hasResult, isStreaming, isWaiting, maybeResult]); + + const summary = useMemo(() => { + const statsEl = + stats.added > 0 || stats.removed > 0 ? ( + <span className={styles.statsInline}> + {stats.added > 0 && ( + <span className={styles.added}>+{stats.added}</span> + )} + {stats.removed > 0 && ( + <span className={styles.removed}>−{stats.removed}</span> + )} + </span> + ) : null; + + const verb = isCreate ? "Create" : "Edit"; + if (isSingleFile && filePath) { + return ( + <> + {verb}{" "} + <span + className={styles.filename} + onClick={(e) => handleFileClick(e, filePath)} + > + {basename(filePath)} + </span> + {statsEl && <> {statsEl}</>} + </> + ); + } + if (fileNames.length > 1) { + return ( + <> + {verb} {fileNames.length} files {statsEl} + </> + ); + } + return ( + <> + {verb} file {statsEl} + </> + ); + }, [ + isCreate, + isSingleFile, + filePath, + fileNames.length, + handleFileClick, + stats.added, + stats.removed, + ]); + + const icon = isCreate ? <PlusIcon /> : <Pencil1Icon />; + + return ( + <ToolCard + icon={icon} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {status === "error" && + maybeResult?.content && + typeof maybeResult.content === "string" && ( + <Box className={styles.errorContent}> + <Text size="1" color="red"> + {maybeResult.content} + </Text> + </Box> + )} + {hasDiffs && ( + <> + <Flex gap="2" className={styles.actionBar}> + <Button + size="1" + variant="soft" + onClick={handleApplyDiff} + disabled={dryRunResult.isLoading || !parsedToolCall} + > + {dryRunResult.isLoading ? <Spinner size="1" /> : "➕ Diff"} + </Button> + {replaceContent && ( + <Button + size="1" + variant="soft" + onClick={handleReplace} + disabled={!canPaste} + > + ➕ Replace + </Button> + )} + </Flex> + {isSingleFile ? ( + <Box className={styles.diffContent}> + {allDiffs.map((diff, i) => ( + <DiffBlock key={i} diff={diff} /> + ))} + </Box> + ) : ( + <Flex direction="column" gap="1" className={styles.fileList}> + {fileNames.map((fileName) => ( + <FileEditItem + key={fileName} + fileName={fileName} + diffs={filesByName[fileName]} + onOpenFile={() => + void queryPathThenOpenFile({ file_path: fileName }) + } + /> + ))} + </Flex> + )} + </> + )} + </ToolCard> + ); +}; + +export default EditTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.module.css new file mode 100644 index 000000000..53a85ccc9 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.module.css @@ -0,0 +1,25 @@ +.filename { + font-family: var(--code-font-family); + font-size: inherit; + color: var(--accent-11); + cursor: pointer; +} + +.filename:hover { + text-decoration: underline; +} + +.meta { + color: var(--gray-11); +} + +.removed { + color: var(--red-11); + font-family: var(--code-font-family); +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.tsx new file mode 100644 index 000000000..c072c994d --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/FileOpTool.tsx @@ -0,0 +1,186 @@ +import React, { useMemo, useCallback } from "react"; +import { MoveIcon, TrashIcon, PlusCircledIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector, useEventsBusForIDE } from "../../../hooks"; +import { + selectToolResultById, + selectManyDiffMessageByIds, +} from "../../../features/Chat/Thread/selectors"; +import { ToolCall, DiffChunk } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { basename } from "./utils"; +import styles from "./FileOpTool.module.css"; + +type FileOpType = "mv" | "rm" | "add_workspace_folder"; + +interface MvArgs { + source?: string; + destination?: string; +} + +interface RmArgs { + path?: string; + recursive?: boolean; +} + +interface AddWorkspaceArgs { + path?: string; +} + +interface FileOpToolProps { + toolCall: ToolCall; + toolType: FileOpType; + diffs?: DiffChunk[]; +} + +export const FileOpTool: React.FC<FileOpToolProps> = ({ + toolCall, + toolType, + diffs = [], +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const { queryPathThenOpenFile } = useEventsBusForIDE(); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const toolDiffs = useAppSelector( + selectManyDiffMessageByIds(toolCall.id ? [toolCall.id] : []), + ); + + const allDiffs = useMemo((): DiffChunk[] => { + const fromProps = diffs; + const fromStore = toolDiffs.flatMap((d) => d.content); + return fromProps.length > 0 ? fromProps : fromStore; + }, [diffs, toolDiffs]); + + const args = useMemo((): MvArgs | RmArgs | AddWorkspaceArgs => { + try { + return JSON.parse(toolCall.function.arguments) as + | MvArgs + | RmArgs + | AddWorkspaceArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (maybeResult) { + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + } + // rm tool returns diff message (not tool message) when deleting files with content + if (toolDiffs.length > 0) { + return "success"; + } + return "running"; + }, [maybeResult, toolDiffs]); + + const handleFileClick = useCallback( + (e: React.MouseEvent, filePath: string) => { + e.stopPropagation(); + void queryPathThenOpenFile({ file_path: filePath }); + }, + [queryPathThenOpenFile], + ); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const { icon, summary } = useMemo(() => { + if (toolType === "mv") { + const mvArgs = args as MvArgs; + const src = mvArgs.source ?? ""; + const dest = mvArgs.destination ?? ""; + return { + icon: <MoveIcon />, + summary: ( + <> + Move{" "} + <span + className={styles.filename} + onClick={(e) => handleFileClick(e, src)} + > + {basename(src)} + </span> + {" → "} + <span + className={styles.filename} + onClick={(e) => handleFileClick(e, dest)} + > + {basename(dest)} + </span> + </> + ), + }; + } + + if (toolType === "add_workspace_folder") { + const addArgs = args as AddWorkspaceArgs; + const path = addArgs.path ?? ""; + return { + icon: <PlusCircledIcon />, + summary: ( + <> + Add workspace{" "} + <span className={styles.filename}>{basename(path)}</span> + </> + ), + }; + } + + // rm + const rmArgs = args as RmArgs; + const path = rmArgs.path ?? ""; + const isDir = rmArgs.recursive; + const linesRemoved = allDiffs.reduce((acc, d) => { + return ( + acc + d.lines_remove.split("\n").filter((l) => l.length > 0).length + ); + }, 0); + return { + icon: <TrashIcon />, + summary: ( + <> + Delete <span className={styles.filename}>{basename(path)}</span> + {isDir && <span className={styles.meta}> (recursive)</span>} + {linesRemoved > 0 && ( + <span className={styles.removed}> −{linesRemoved}</span> + )} + </> + ), + }; + }, [toolType, args, handleFileClick, allDiffs]); + + return ( + <ToolCard + icon={icon} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + </ToolCard> + ); +}; + +export default FileOpTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.module.css new file mode 100644 index 000000000..0f8ea00b3 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.module.css @@ -0,0 +1,16 @@ +.args { + color: var(--gray-11); + font-family: var(--code-font-family); + font-size: inherit; +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} + +.markdownContent { + padding: var(--space-2); + font-size: var(--font-size-1); +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.tsx new file mode 100644 index 000000000..8e1b47ac6 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/GenericTool.tsx @@ -0,0 +1,123 @@ +import React, { useMemo } from "react"; +import { GearIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector } from "../../../hooks"; +import { + selectToolResultById, + selectIsStreaming, + selectIsWaiting, +} from "../../../features/Chat/Thread/selectors"; +import { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { Markdown } from "../../Markdown"; +import styles from "./GenericTool.module.css"; + +interface GenericToolProps { + toolCall: ToolCall; +} + +function formatToolName(name: string): string { + return name.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase()); +} + +function formatArgs(argsStr: string): string { + try { + const args = JSON.parse(argsStr) as Record<string, unknown>; + const entries = Object.entries(args); + if (entries.length === 0) return ""; + return entries + .map(([k, v]) => { + const valueStr = typeof v === "string" ? v : JSON.stringify(v); + return `${k}=${valueStr}`; + }) + .join(", "); + } catch { + return argsStr; + } +} + +function looksLikeMarkdown(text: string): boolean { + if (text.includes("```")) return true; + if (/\[[^\]]+\]\([^)]+\)/.test(text)) return true; + if (/^#{1,6}\s+\S/m.test(text)) return true; + if (/^\s*([-*+])\s+\S/m.test(text)) return true; + if (/^\s*\d+\.\s+\S/m.test(text)) return true; + const hasTableHeader = /^\s*\|.+\|\s*$/m.test(text); + const hasTableSep = /^\s*\|[\s:|-]+\|\s*$/m.test(text); + if (hasTableHeader && hasTableSep) return true; + return false; +} + +export const GenericTool: React.FC<GenericToolProps> = ({ toolCall }) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult && (isStreaming || isWaiting)) return "running"; + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult, isStreaming, isWaiting]); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const toolName = toolCall.function.name ?? "tool"; + const argsPreview = formatArgs(toolCall.function.arguments); + + const summary = useMemo(() => { + const displayName = formatToolName(toolName); + if (argsPreview) { + return ( + <> + {displayName} <span className={styles.args}>{argsPreview}</span> + </> + ); + } + return displayName; + }, [toolName, argsPreview]); + + const shouldRenderMarkdown = + content && content.length <= 50000 && looksLikeMarkdown(content); + + return ( + <ToolCard + icon={<GearIcon />} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + {shouldRenderMarkdown ? ( + <Box className={styles.markdownContent}> + <Markdown>{content}</Markdown> + </Box> + ) : ( + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + )} + </Box> + )} + </ToolCard> + ); +}; + +export default GenericTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.module.css new file mode 100644 index 000000000..87742b5d0 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.module.css @@ -0,0 +1,9 @@ +.query { + color: var(--accent-11); +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.tsx new file mode 100644 index 000000000..12c1d2ede --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/KnowledgeTool.tsx @@ -0,0 +1,141 @@ +import React, { useMemo } from "react"; +import { ReaderIcon, ChatBubbleIcon, Pencil2Icon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { ContextFileList } from "./ContextFileList"; +import { useAppSelector } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ChatContextFile, ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./KnowledgeTool.module.css"; + +type KnowledgeToolType = + | "knowledge" + | "create_knowledge" + | "trajectories" + | "search_trajectories"; + +interface KnowledgeArgs { + search_key?: string; +} + +interface CreateKnowledgeArgs { + content?: string; +} + +interface TrajectoriesArgs { + query?: string; +} + +interface KnowledgeToolProps { + toolCall: ToolCall; + toolType: KnowledgeToolType; + contextFiles?: ChatContextFile[]; +} + +export const KnowledgeTool: React.FC<KnowledgeToolProps> = ({ + toolCall, + toolType, + contextFiles, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const args = useMemo((): + | KnowledgeArgs + | CreateKnowledgeArgs + | TrajectoriesArgs => { + try { + return JSON.parse(toolCall.function.arguments) as + | KnowledgeArgs + | CreateKnowledgeArgs + | TrajectoriesArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const summary = useMemo(() => { + if (toolType === "knowledge") { + const knowledgeArgs = args as KnowledgeArgs; + const query = knowledgeArgs.search_key ?? "knowledge"; + return ( + <> + Recall <span className={styles.query}>&quot;{query}&quot;</span> + </> + ); + } + + if (toolType === "create_knowledge") { + const createArgs = args as CreateKnowledgeArgs; + const preview = createArgs.content ?? "memory"; + return ( + <> + Remember <span className={styles.query}>&quot;{preview}&quot;</span> + </> + ); + } + + // toolType === "trajectories" || toolType === "search_trajectories" + const trajArgs = args as TrajectoriesArgs; + const query = trajArgs.query ?? "conversations"; + return ( + <> + Recall <span className={styles.query}>&quot;{query}&quot;</span> + </> + ); + }, [toolType, args]); + + const icon = + toolType === "create_knowledge" ? ( + <Pencil2Icon /> + ) : toolType === "knowledge" ? ( + <ReaderIcon /> + ) : ( + <ChatBubbleIcon /> + ); + + return ( + <ToolCard + icon={icon} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + {contextFiles && contextFiles.length > 0 && ( + <ContextFileList files={contextFiles} /> + )} + </ToolCard> + ); +}; + +export default KnowledgeTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.module.css new file mode 100644 index 000000000..61a583f7f --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.module.css @@ -0,0 +1,16 @@ +.path { + font-family: var(--code-font-family); + font-size: 0.95em; + color: var(--accent-11); + cursor: pointer; +} + +.path:hover { + text-decoration: underline; +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.tsx new file mode 100644 index 000000000..f56ddc0cb --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ListTool.tsx @@ -0,0 +1,112 @@ +import React, { useMemo, useCallback } from "react"; +import { ArchiveIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { ContextFileList } from "./ContextFileList"; +import { useAppSelector, useEventsBusForIDE } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ChatContextFile, ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./ListTool.module.css"; + +interface ListToolArgs { + path?: string; + use_ast?: boolean; + max_files?: number; +} + +interface ListToolProps { + toolCall: ToolCall; + contextFiles?: ChatContextFile[]; +} + +export const ListTool: React.FC<ListToolProps> = ({ + toolCall, + contextFiles, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const { queryPathThenOpenFile } = useEventsBusForIDE(); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const args = useMemo<ListToolArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as ListToolArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const handlePathClick = useCallback( + (e: React.MouseEvent) => { + e.stopPropagation(); + if (args.path) { + void queryPathThenOpenFile({ file_path: args.path }); + } + }, + [queryPathThenOpenFile, args.path], + ); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const summary = useMemo(() => { + const path = args.path ?? "project"; + return ( + <> + List{" "} + <span className={styles.path} onClick={handlePathClick}> + {path} + </span> + </> + ); + }, [args.path, handlePathClick]); + + const meta = useMemo(() => { + const parts: string[] = []; + if (args.use_ast) parts.push("AST"); + if (args.max_files) parts.push(`max ${args.max_files}`); + return parts.length > 0 ? parts.join(" · ") : null; + }, [args.use_ast, args.max_files]); + + return ( + <ToolCard + icon={<ArchiveIcon />} + summary={summary} + meta={meta} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + {contextFiles && contextFiles.length > 0 && ( + <ContextFileList files={contextFiles} /> + )} + </ToolCard> + ); +}; + +export default ListTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIAudioTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIAudioTool.tsx new file mode 100644 index 000000000..fb5f44750 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIAudioTool.tsx @@ -0,0 +1,59 @@ +import React, { useMemo } from "react"; +import { SpeakerLoudIcon } from "@radix-ui/react-icons"; +import { Box, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIAudioTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const transcript = useMemo(() => { + if (!args) return null; + if (typeof args.transcript === "string") return args.transcript; + if (typeof args.text === "string") return args.text; + return null; + }, [args]); + + const summary = transcript ? ( + <> + Audio:{" "} + <span className={styles.inlineCode}> + {transcript.slice(0, 40)} + {transcript.length > 40 ? "…" : ""} + </span> + </> + ) : ( + "Audio" + ); + + return ( + <ToolCard + icon={<SpeakerLoudIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {transcript && ( + <Box className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}>{transcript}</ShikiCodeBlock> + </Box> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAICodeInterpreterCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAICodeInterpreterCallTool.tsx new file mode 100644 index 000000000..5929f19b0 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAICodeInterpreterCallTool.tsx @@ -0,0 +1,89 @@ +import React, { useMemo } from "react"; +import { LapTimerIcon } from "@radix-ui/react-icons"; +import { Box, Flex, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { DialogImage } from "../../DialogImage"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +type CodeInterpreterOutput = { + type?: string; + text?: string; + image_url?: string; + image?: { url?: string }; +}; + +export const OpenAICodeInterpreterCallTool: React.FC<Props> = ({ + toolCall, +}) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const outputs = useMemo(() => { + if (!args) return [] as CodeInterpreterOutput[]; + if (!Array.isArray(args.outputs)) return [] as CodeInterpreterOutput[]; + return (args.outputs as unknown[]) + .map((o) => + typeof o === "object" && o ? (o as CodeInterpreterOutput) : {}, + ) + .slice(0, 200); + }, [args]); + + const textOutputs = outputs + .map((o) => (typeof o.text === "string" ? o.text : null)) + .filter((t): t is string => !!t); + + const imageUrls: string[] = outputs + .map((o) => { + if (typeof o.image_url === "string") return o.image_url; + if (o.image && typeof o.image.url === "string") return o.image.url; + return null; + }) + .filter((u): u is string => !!u); + + const summary = ( + <> + Code Interpreter{" "} + <span className={styles.inlineCode}>{outputs.length} outputs</span> + </> + ); + + return ( + <ToolCard + icon={<LapTimerIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {textOutputs.length > 0 && ( + <Box className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}> + {textOutputs.join("\n\n")} + </ShikiCodeBlock> + </Box> + )} + + {imageUrls.length > 0 && ( + <Flex py="2" gap="2" wrap="wrap"> + {imageUrls.map((url, idx) => ( + <DialogImage key={idx} src={url} fallback="" size="8" /> + ))} + </Flex> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallOutputTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallOutputTool.tsx new file mode 100644 index 000000000..7ec5f6146 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallOutputTool.tsx @@ -0,0 +1,57 @@ +import React, { useMemo } from "react"; +import { DesktopIcon, ImageIcon } from "@radix-ui/react-icons"; +import { Flex, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { DialogImage } from "../../DialogImage"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIComputerCallOutputTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const imageUrls = useMemo(() => { + if (!args) return [] as string[]; + + // Typical shape: { output: { image_url: "..." } } + const output = + typeof args.output === "object" && args.output !== null + ? (args.output as Record<string, unknown>) + : null; + const url = + output && typeof output.image_url === "string" ? output.image_url : null; + + if (url) return [url]; + return []; + }, [args]); + + return ( + <ToolCard + icon={imageUrls.length > 0 ? <ImageIcon /> : <DesktopIcon />} + summary={"Computer Output"} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {imageUrls.length > 0 && ( + <Flex py="2" gap="2" wrap="wrap"> + {imageUrls.map((url, idx) => ( + <DialogImage key={idx} src={url} fallback="" size="8" /> + ))} + </Flex> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallTool.tsx new file mode 100644 index 000000000..b074c4ea3 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIComputerCallTool.tsx @@ -0,0 +1,46 @@ +import React, { useMemo } from "react"; +import { DesktopIcon } from "@radix-ui/react-icons"; +import { Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIComputerCallTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const summary = useMemo(() => { + const action = + args && typeof args.action === "string" ? args.action : undefined; + return action ? ( + <> + Computer Call: <span className={styles.inlineCode}>{action}</span> + </> + ) : ( + state.label + ); + }, [args, state.label]); + + return ( + <ToolCard + icon={<DesktopIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIFileSearchCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIFileSearchCallTool.tsx new file mode 100644 index 000000000..d06443193 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIFileSearchCallTool.tsx @@ -0,0 +1,87 @@ +import React, { useMemo } from "react"; +import { FileTextIcon } from "@radix-ui/react-icons"; +import { Box, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +type FileSearchResult = { + filename?: string; + file_name?: string; + text?: string; + content?: string; +}; + +export const OpenAIFileSearchCallTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + + const args = state.parsedArgs as Record<string, unknown> | null; + const query = args && typeof args.query === "string" ? args.query : undefined; + + const results = useMemo(() => { + if (!args) return [] as FileSearchResult[]; + if (!Array.isArray(args.results)) return [] as FileSearchResult[]; + return (args.results as unknown[]) + .map((r) => (typeof r === "object" && r ? (r as FileSearchResult) : {})) + .slice(0, 200); + }, [args]); + + const summary = query ? ( + <> + File Search: <span className={styles.inlineCode}>{query}</span> + </> + ) : ( + state.label + ); + + return ( + <ToolCard + icon={<FileTextIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {results.length > 0 && ( + <Box> + <Text size="1" color="gray"> + Matches ({results.length}) + </Text> + <Box className={styles.resultList}> + {results.map((r, idx) => { + const filename = r.filename ?? r.file_name ?? "(file)"; + const text = r.text ?? r.content ?? ""; + return ( + <Box key={idx} className={styles.resultItem}> + <Text size="2" weight="medium" className={styles.inlineCode}> + {filename} + </Text> + {text && ( + <Box mt="1" className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}> + {text} + </ShikiCodeBlock> + </Box> + )} + </Box> + ); + })} + </Box> + </Box> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIImageGenerationCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIImageGenerationCallTool.tsx new file mode 100644 index 000000000..854b7e835 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIImageGenerationCallTool.tsx @@ -0,0 +1,61 @@ +import React, { useMemo } from "react"; +import { ImageIcon } from "@radix-ui/react-icons"; +import { Flex, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { DialogImage } from "../../DialogImage"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIImageGenerationCallTool: React.FC<Props> = ({ + toolCall, +}) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const imageUrls = useMemo(() => { + if (!args) return [] as string[]; + + // Sometimes: { image_url: "..." } or { output: { image_url: "..." } } + const direct = typeof args.image_url === "string" ? args.image_url : null; + const output = + typeof args.output === "object" && args.output !== null + ? (args.output as Record<string, unknown>) + : null; + const nested = + output && typeof output.image_url === "string" ? output.image_url : null; + + return [direct, nested].filter( + (u): u is string => typeof u === "string" && u.length > 0, + ); + }, [args]); + + return ( + <ToolCard + icon={<ImageIcon />} + summary={"Image Generation"} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {imageUrls.length > 0 && ( + <Flex py="2" gap="2" wrap="wrap"> + {imageUrls.map((url, idx) => ( + <DialogImage key={idx} src={url} fallback="" size="8" /> + ))} + </Flex> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpCallTool.tsx new file mode 100644 index 000000000..7a4925d06 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpCallTool.tsx @@ -0,0 +1,48 @@ +import React, { useMemo } from "react"; +import { CubeIcon } from "@radix-ui/react-icons"; +import { Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIMcpCallTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const summary = useMemo(() => { + if (!args) return "MCP Call"; + const server = typeof args.server === "string" ? args.server : undefined; + const tool = typeof args.tool === "string" ? args.tool : undefined; + const label = [server, tool].filter(Boolean).join(" "); + return label ? ( + <> + MCP Call: <span className={styles.inlineCode}>{label}</span> + </> + ) : ( + "MCP Call" + ); + }, [args]); + + return ( + <ToolCard + icon={<CubeIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpListToolsTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpListToolsTool.tsx new file mode 100644 index 000000000..55045a4ad --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIMcpListToolsTool.tsx @@ -0,0 +1,32 @@ +import React from "react"; +import { CubeIcon } from "@radix-ui/react-icons"; +import { Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIMcpListToolsTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + + return ( + <ToolCard + icon={<CubeIcon />} + summary={"MCP List Tools"} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIRefusalTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIRefusalTool.tsx new file mode 100644 index 000000000..18c4823e5 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIRefusalTool.tsx @@ -0,0 +1,47 @@ +import React, { useMemo } from "react"; +import { ExclamationTriangleIcon } from "@radix-ui/react-icons"; +import { Box, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +export const OpenAIRefusalTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + const args = state.parsedArgs as Record<string, unknown> | null; + + const refusal = useMemo(() => { + if (!args) return null; + if (typeof args.refusal === "string") return args.refusal; + if (typeof args.text === "string") return args.text; + return null; + }, [args]); + + return ( + <ToolCard + icon={<ExclamationTriangleIcon />} + summary={"Refusal"} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {refusal && ( + <Box className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}>{refusal}</ShikiCodeBlock> + </Box> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.module.css new file mode 100644 index 000000000..c086b7b10 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.module.css @@ -0,0 +1,31 @@ +.content { + margin-top: 8px; +} + +.inlineCode { + font-family: var(--code-font-family); + color: var(--gray-12); +} + +.resultList { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; + margin-top: var(--space-1); + border-radius: var(--radius-2); + border: 1px solid var(--gray-a4); +} + +.resultItem { + padding: var(--space-2); + border-bottom: 1px solid var(--gray-a4); +} + +.resultItem:last-child { + border-bottom: none; +} + +.codeBox { + border-radius: var(--radius-2); + overflow: hidden; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.tsx new file mode 100644 index 000000000..8476bc053 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIResponsesTool.tsx @@ -0,0 +1,325 @@ +import React, { useMemo } from "react"; +import { + CubeIcon, + FileTextIcon, + MagnifyingGlassIcon, + LapTimerIcon, + DesktopIcon, + SpeakerLoudIcon, + ImageIcon, + ExclamationTriangleIcon, +} from "@radix-ui/react-icons"; +import { Box, Flex, Text } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector } from "../../../hooks"; +import { + selectIsStreaming, + selectIsWaiting, + selectToolResultById, +} from "../../../features/Chat/Thread/selectors"; +import { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import { Markdown } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { toolNameLabel } from "./openaiResponsesToolCardState"; + +type OpenAIResponsesToolProps = { + toolCall: ToolCall; +}; + +function parseJsonOrNull(text: string): unknown { + try { + return JSON.parse(text) as unknown; + } catch { + return null; + } +} + +function getToolIcon(toolName: string): React.ReactNode { + switch (toolName) { + case "openai_web_search_call": + return <MagnifyingGlassIcon />; + case "openai_file_search_call": + return <FileTextIcon />; + case "openai_code_interpreter_call": + return <LapTimerIcon />; + case "openai_computer_call": + case "openai_computer_call_output": + return <DesktopIcon />; + case "openai_audio": + return <SpeakerLoudIcon />; + case "openai_image_generation_call": + return <ImageIcon />; + case "openai_refusal": + return <ExclamationTriangleIcon />; + default: + return <CubeIcon />; + } +} + +function extractToolSummary(toolName: string, args: unknown): React.ReactNode { + const label = toolNameLabel(toolName); + if (!args || typeof args !== "object") { + return label; + } + + // Most server tools pass the full output item JSON as arguments. + const obj = args as Record<string, unknown>; + const t = typeof obj.type === "string" ? obj.type : undefined; + + if (toolName === "openai_web_search_call") { + const q = typeof obj.query === "string" ? obj.query : undefined; + return q ? ( + <> + Web Search: <span className={styles.inlineCode}>{q}</span> + </> + ) : ( + label + ); + } + + if (toolName === "openai_file_search_call") { + const q = typeof obj.query === "string" ? obj.query : undefined; + return q ? ( + <> + File Search: <span className={styles.inlineCode}>{q}</span> + </> + ) : ( + label + ); + } + + if (toolName === "openai_code_interpreter_call") { + return <>{t ? `Code Interpreter (${t})` : "Code Interpreter"}</>; + } + + if (toolName === "openai_computer_call") { + return <>{t ? `Computer Call (${t})` : "Computer Call"}</>; + } + + if (toolName === "openai_computer_call_output") { + return <>{t ? `Computer Output (${t})` : "Computer Output"}</>; + } + + if (toolName === "openai_audio") { + return <>{t ? `Audio (${t})` : "Audio"}</>; + } + + if (toolName === "openai_image_generation_call") { + return <>{t ? `Image (${t})` : "Image Generation"}</>; + } + + if (toolName === "openai_refusal") { + return "Refusal"; + } + + return label; +} + +export const OpenAIResponsesTool: React.FC<OpenAIResponsesToolProps> = ({ + toolCall, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult && (isStreaming || isWaiting)) return "running"; + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult, isStreaming, isWaiting]); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const toolName = toolCall.function.name ?? "openai"; + + const parsedArgs = useMemo( + () => parseJsonOrNull(toolCall.function.arguments), + [toolCall.function.arguments], + ); + + const summary = useMemo( + () => extractToolSummary(toolName, parsedArgs), + [toolName, parsedArgs], + ); + + const rawJson = useMemo(() => { + if (parsedArgs == null) return toolCall.function.arguments; + try { + return JSON.stringify(parsedArgs, null, 2); + } catch { + return toolCall.function.arguments; + } + }, [parsedArgs, toolCall.function.arguments]); + + return ( + <ToolCard + icon={getToolIcon(toolName)} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + <Box className={styles.content}> + {content ? <Markdown>{content}</Markdown> : null} + + {parsedArgs != null && typeof parsedArgs === "object" && ( + <Box mb="2">{renderOpenAiResponsesPayload(toolName, parsedArgs)}</Box> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{rawJson}</ShikiCodeBlock> + </Box> + </ToolCard> + ); +}; + +function renderOpenAiResponsesPayload( + toolName: string, + args: unknown, +): React.ReactNode { + const obj = args as Record<string, unknown>; + + // Web search: show results list (if present) + if (toolName === "openai_web_search_call") { + const results = Array.isArray(obj.results) + ? (obj.results as unknown[]) + : []; + if (results.length > 0) { + return ( + <Box> + <Text size="1" color="gray"> + Results ({results.length}) + </Text> + <Box className={styles.resultList}> + {results.slice(0, 20).map((r, idx) => { + const rr = r as Record<string, unknown>; + const title = + typeof rr.title === "string" ? rr.title : "(no title)"; + const url = typeof rr.url === "string" ? rr.url : ""; + const snippet = + typeof rr.snippet === "string" + ? rr.snippet + : typeof rr.description === "string" + ? rr.description + : ""; + return ( + <Box key={idx} className={styles.resultItem}> + <Flex direction="column" gap="1"> + <Text size="2" weight="medium"> + {title} + </Text> + {url && ( + <Text size="1" color="gray" className={styles.inlineCode}> + {url} + </Text> + )} + {snippet && ( + <Text size="1" color="gray"> + {snippet} + </Text> + )} + </Flex> + </Box> + ); + })} + </Box> + </Box> + ); + } + } + + // File search: show matches (if present) + if (toolName === "openai_file_search_call") { + const results = Array.isArray(obj.results) + ? (obj.results as unknown[]) + : []; + if (results.length > 0) { + return ( + <Box> + <Text size="1" color="gray"> + Matches ({results.length}) + </Text> + <Box className={styles.resultList}> + {results.slice(0, 50).map((r, idx) => { + const rr = r as Record<string, unknown>; + const filename = + typeof rr.filename === "string" + ? rr.filename + : typeof rr.file_name === "string" + ? rr.file_name + : "(file)"; + const text = + typeof rr.text === "string" + ? rr.text + : typeof rr.content === "string" + ? rr.content + : ""; + return ( + <Box key={idx} className={styles.resultItem}> + <Text size="2" weight="medium" className={styles.inlineCode}> + {filename} + </Text> + {text && ( + <Box mt="1" className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}> + {text} + </ShikiCodeBlock> + </Box> + )} + </Box> + ); + })} + </Box> + </Box> + ); + } + } + + // Code interpreter outputs: preserve JSON, but try to show text outputs. + if (toolName === "openai_code_interpreter_call") { + const outputs = Array.isArray(obj.outputs) + ? (obj.outputs as unknown[]) + : []; + if (outputs.length > 0) { + const textOutputs: string[] = []; + for (const out of outputs) { + const oo = out as Record<string, unknown>; + if (typeof oo.text === "string") textOutputs.push(oo.text); + } + if (textOutputs.length > 0) { + return ( + <Box className={styles.codeBox}> + <ShikiCodeBlock showLineNumbers={false}> + {textOutputs.join("\n\n")} + </ShikiCodeBlock> + </Box> + ); + } + } + } + + // Computer call output can include images; keep JSON for now (images will be rendered in result content if present there) + // Audio and refusal: keep JSON; assistant UI already handles transcripts/refusal text if surfaced. + + return null; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIWebSearchCallTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIWebSearchCallTool.tsx new file mode 100644 index 000000000..dcb18a9ea --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/OpenAIWebSearchCallTool.tsx @@ -0,0 +1,114 @@ +import React, { useMemo } from "react"; +import { MagnifyingGlassIcon } from "@radix-ui/react-icons"; +import { Box, Flex, Link, Text } from "@radix-ui/themes"; + +import { ToolCard } from "./ToolCard"; +import type { ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./OpenAIResponsesTool.module.css"; +import { useOpenAiResponsesToolCardState } from "./openaiResponsesToolCardState"; + +type Props = { + toolCall: ToolCall; +}; + +type WebSearchResult = { + url?: string; + title?: string; + snippet?: string; + description?: string; +}; + +function isSafeHttpUrl(url: string): boolean { + try { + const parsed = new URL(url); + return parsed.protocol === "http:" || parsed.protocol === "https:"; + } catch { + return false; + } +} + +export const OpenAIWebSearchCallTool: React.FC<Props> = ({ toolCall }) => { + const state = useOpenAiResponsesToolCardState(toolCall); + + const args = state.parsedArgs as Record<string, unknown> | null; + const query = args && typeof args.query === "string" ? args.query : undefined; + + const results = useMemo(() => { + if (!args) return [] as WebSearchResult[]; + if (!Array.isArray(args.results)) return [] as WebSearchResult[]; + return (args.results as unknown[]) + .map((r) => (typeof r === "object" && r ? (r as WebSearchResult) : {})) + .slice(0, 50); + }, [args]); + + const summary = query ? ( + <> + Web Search: <span className={styles.inlineCode}>{query}</span> + </> + ) : ( + state.label + ); + + return ( + <ToolCard + icon={<MagnifyingGlassIcon />} + summary={summary} + status={state.status} + isOpen={state.isOpen} + onToggle={state.toggleOpen} + toolCall={toolCall} + > + {results.length > 0 && ( + <Box> + <Text size="1" color="gray"> + Results ({results.length}) + </Text> + <Box className={styles.resultList}> + {results.map((r, idx) => { + const title = r.title ?? "(no title)"; + const url = r.url ?? ""; + const safeUrl = url && isSafeHttpUrl(url) ? url : ""; + const snippet = r.snippet ?? r.description ?? ""; + return ( + <Box key={idx} className={styles.resultItem}> + <Flex direction="column" gap="1"> + {safeUrl ? ( + <Link + href={safeUrl} + target="_blank" + rel="noopener noreferrer" + size="2" + > + {title} + </Link> + ) : ( + <Text size="2" weight="medium"> + {title} + </Text> + )} + {safeUrl && ( + <Text size="1" color="gray" className={styles.inlineCode}> + {safeUrl} + </Text> + )} + {snippet && ( + <Text size="1" color="gray"> + {snippet} + </Text> + )} + </Flex> + </Box> + ); + })} + </Box> + </Box> + )} + + <Text size="1" color="gray"> + Raw JSON + </Text> + <ShikiCodeBlock showLineNumbers={false}>{state.rawJson}</ShikiCodeBlock> + </ToolCard> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/PlanningTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/PlanningTool.tsx new file mode 100644 index 000000000..6b1786814 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/PlanningTool.tsx @@ -0,0 +1,20 @@ +import React from "react"; +import { TargetIcon } from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; + +interface PlanningToolProps { + toolCall: ToolCall; +} + +export const PlanningTool: React.FC<PlanningToolProps> = ({ toolCall }) => { + return ( + <StreamingToolCard + toolCall={toolCall} + icon={<TargetIcon />} + summary="Plan solution" + /> + ); +}; + +export default PlanningTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.module.css new file mode 100644 index 000000000..aa09f501a --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.module.css @@ -0,0 +1,24 @@ +.filename { + font-family: var(--code-font-family); + font-size: inherit; + color: var(--accent-11); + cursor: pointer; + vertical-align: baseline; +} + +.args { + color: var(--gray-11); + font-family: var(--code-font-family); + font-size: inherit; + vertical-align: baseline; +} + +.filename:hover { + text-decoration: underline; +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.tsx new file mode 100644 index 000000000..ad5b6c2c6 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ReadTool.tsx @@ -0,0 +1,135 @@ +import React, { useMemo, useCallback } from "react"; +import { FileTextIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { ContextFileList } from "./ContextFileList"; +import { useAppSelector, useEventsBusForIDE } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ChatContextFile, ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./ReadTool.module.css"; + +interface ReadToolArgs { + paths?: string; +} + +function basename(path: string): string { + const parts = path.split("/"); + return parts[parts.length - 1] || path; +} + +interface ReadToolProps { + toolCall: ToolCall; + contextFiles?: ChatContextFile[]; +} + +export const ReadTool: React.FC<ReadToolProps> = ({ + toolCall, + contextFiles, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + const { queryPathThenOpenFile } = useEventsBusForIDE(); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const args = useMemo<ReadToolArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as ReadToolArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const paths = useMemo(() => { + return ( + args.paths + ?.split(",") + .map((p) => p.trim()) + .filter(Boolean) ?? [] + ); + }, [args.paths]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const handleFileClick = useCallback( + (e: React.MouseEvent, filePath: string) => { + e.stopPropagation(); + void queryPathThenOpenFile({ file_path: filePath }); + }, + [queryPathThenOpenFile], + ); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const summary = useMemo(() => { + if (paths.length === 0) return "Read file"; + if (paths.length === 1) { + return ( + <> + Read{" "} + <span + className={styles.filename} + onClick={(e) => handleFileClick(e, paths[0])} + > + {basename(paths[0])} + </span> + </> + ); + } + return ( + <> + Read{" "} + {paths.map((p, i) => ( + <React.Fragment key={p}> + {i > 0 && ", "} + <span + className={styles.filename} + onClick={(e) => handleFileClick(e, p)} + > + {basename(p)} + </span> + </React.Fragment> + ))} + </> + ); + }, [paths, handleFileClick]); + + return ( + <ToolCard + icon={<FileTextIcon />} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + {contextFiles && contextFiles.length > 0 && ( + <ContextFileList files={contextFiles} /> + )} + </ToolCard> + ); +}; + +export default ReadTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ResearchTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ResearchTool.tsx new file mode 100644 index 000000000..74cbf2163 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ResearchTool.tsx @@ -0,0 +1,34 @@ +import React, { useMemo } from "react"; +import { ReaderIcon } from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; +interface ResearchArgs { + research_query?: string; +} + +interface ResearchToolProps { + toolCall: ToolCall; +} + +export const ResearchTool: React.FC<ResearchToolProps> = ({ toolCall }) => { + const args = useMemo<ResearchArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as ResearchArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const query = args.research_query ?? ""; + const summary = query ? `Research "${query}"` : "Research"; + + return ( + <StreamingToolCard + toolCall={toolCall} + icon={<ReaderIcon />} + summary={summary} + /> + ); +}; + +export default ResearchTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.module.css new file mode 100644 index 000000000..d63c08e2d --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.module.css @@ -0,0 +1,18 @@ +.query { + font-family: var(--code-font-family); + font-size: 0.95em; + color: var(--accent-11); + vertical-align: baseline; +} + +.count { + color: var(--gray-10); + vertical-align: baseline; + white-space: nowrap; +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.tsx new file mode 100644 index 000000000..7582f5da1 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/SearchTool.tsx @@ -0,0 +1,164 @@ +import React, { useMemo } from "react"; +import { MagnifyingGlassIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { ContextFileList } from "./ContextFileList"; +import { useAppSelector } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ChatContextFile, ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./SearchTool.module.css"; + +type SearchToolType = + | "search_pattern" + | "search_semantic" + | "search_symbol_definition"; + +interface SearchPatternArgs { + pattern?: string; + scope?: string; +} + +interface SearchSemanticArgs { + queries?: string; + scope?: string; +} + +interface SearchSymbolArgs { + symbols?: string; +} + +interface SearchToolProps { + toolCall: ToolCall; + toolType: SearchToolType; + contextFiles?: ChatContextFile[]; +} + +function countMatches(content: string): number | null { + const lines = content.split("\n").filter((l) => l.trim()); + if (lines.length === 0) return null; + return lines.length; +} + +export const SearchTool: React.FC<SearchToolProps> = ({ + toolCall, + toolType, + contextFiles, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const args = useMemo((): + | SearchPatternArgs + | SearchSemanticArgs + | SearchSymbolArgs => { + try { + return JSON.parse(toolCall.function.arguments) as + | SearchPatternArgs + | SearchSemanticArgs + | SearchSymbolArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + // Don't show match count on error - error messages also have content + const matchCount = + content && status !== "error" ? countMatches(content) : null; + + const summary = useMemo(() => { + switch (toolType) { + case "search_pattern": { + const patternArgs = args as SearchPatternArgs; + const pattern = patternArgs.pattern ?? "pattern"; + return ( + <> + Search <span className={styles.query}>{pattern}</span> + {matchCount !== null && ( + <span className={styles.count}> → {matchCount} matches</span> + )} + </> + ); + } + case "search_semantic": { + const semanticArgs = args as SearchSemanticArgs; + const query = semanticArgs.queries ?? "query"; + return ( + <> + Search <span className={styles.query}>&quot;{query}&quot;</span> + {matchCount !== null && ( + <span className={styles.count}> → {matchCount} results</span> + )} + </> + ); + } + case "search_symbol_definition": { + const symbolArgs = args as SearchSymbolArgs; + const symbols = symbolArgs.symbols ?? "symbol"; + return ( + <> + Find <span className={styles.query}>{symbols}</span> + {matchCount !== null && ( + <span className={styles.count}> → {matchCount} found</span> + )} + </> + ); + } + } + }, [toolType, args, matchCount]); + + const meta = useMemo(() => { + if (toolType === "search_pattern" || toolType === "search_semantic") { + const scopeArgs = args as SearchPatternArgs | SearchSemanticArgs; + if (scopeArgs.scope && scopeArgs.scope !== "workspace") { + return scopeArgs.scope; + } + } + return null; + }, [toolType, args]); + + return ( + <ToolCard + icon={<MagnifyingGlassIcon />} + summary={summary} + meta={meta} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + {contextFiles && contextFiles.length > 0 && ( + <ContextFileList files={contextFiles} /> + )} + </ToolCard> + ); +}; + +export default SearchTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ShellServiceTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ShellServiceTool.tsx new file mode 100644 index 000000000..a42c8182e --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ShellServiceTool.tsx @@ -0,0 +1,65 @@ +import React, { useMemo } from "react"; +import { + PlayIcon, + StopIcon, + ReloadIcon, + InfoCircledIcon, + FileTextIcon, +} from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; + +interface ShellServiceArgs { + service_name?: string; + action?: string; + command?: string; + workdir?: string; +} + +const ACTION_ICONS: Record<string, React.ReactNode> = { + start: <PlayIcon />, + stop: <StopIcon />, + restart: <ReloadIcon />, + status: <InfoCircledIcon />, + logs: <FileTextIcon />, +}; + +interface ShellServiceToolProps { + toolCall: ToolCall; +} + +export const ShellServiceTool: React.FC<ShellServiceToolProps> = ({ + toolCall, +}) => { + const args = useMemo<ShellServiceArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as ShellServiceArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const action = args.action ?? "manage"; + const serviceName = args.service_name ?? "service"; + const icon = ACTION_ICONS[action] ?? <PlayIcon />; + + const actionLabel = action.charAt(0).toUpperCase() + action.slice(1); + const summary = `${actionLabel} ${serviceName}`; + + const meta = args.command + ? args.command + : args.workdir + ? `in ${args.workdir}` + : null; + + return ( + <StreamingToolCard + toolCall={toolCall} + icon={icon} + summary={summary} + meta={meta} + /> + ); +}; + +export default ShellServiceTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ShellTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ShellTool.tsx new file mode 100644 index 000000000..ea8903ba3 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ShellTool.tsx @@ -0,0 +1,38 @@ +import React, { useMemo } from "react"; +import { CodeIcon } from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; + +interface ShellArgs { + command?: string; + workdir?: string; +} + +interface ShellToolProps { + toolCall: ToolCall; +} + +export const ShellTool: React.FC<ShellToolProps> = ({ toolCall }) => { + const args = useMemo<ShellArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as ShellArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const command = args.command ?? toolCall.function.arguments; + const summary = `Run ${command}`; + const meta = args.workdir ? `in ${args.workdir}` : null; + + return ( + <StreamingToolCard + toolCall={toolCall} + icon={<CodeIcon />} + summary={summary} + meta={meta} + /> + ); +}; + +export default ShellTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.module.css new file mode 100644 index 000000000..230cb435f --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.module.css @@ -0,0 +1,136 @@ +.card { + overflow: hidden; +} + +.header { + cursor: pointer; + padding: var(--space-1) 0; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.icon { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; + height: 16px; + flex-shrink: 0; +} + +.summary { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + flex: 1 1 auto; + min-width: 0; +} + +/* Running state - subtle text fade */ +@keyframes subtlePulse { + 0%, + 100% { + opacity: 0.9; + } + 50% { + opacity: 0.6; + } +} + +.running { + color: var(--gray-11); + animation: subtlePulse 2s ease-in-out infinite; +} + +.meta { + flex: 0 10 auto; + min-width: 0; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.entertainmentRow { + padding-left: 28px; + padding-top: 2px; + padding-bottom: 2px; +} + +.entertainmentText { + color: var(--gray-10); + font-size: 12px; + white-space: normal; +} + +.entertainmentContent { + padding-left: 28px; + padding-top: 2px; + padding-bottom: 2px; + max-height: 260px; + overflow-y: auto; + composes: scrollbarThin from "../../shared/scrollbar.module.css"; +} + +/* Keep progress typography stable; let Markdown do structure. */ +.entertainmentMarkdown { + color: var(--gray-10); + font-size: 12px; + line-height: 1.5; +} + +/* Avoid extra vertical rhythm inside the fixed-height scroller. */ +.entertainmentMarkdown :global(p) { + margin: 0; + white-space: pre-wrap; +} + +.entertainmentMarkdown :global(p + p), +.entertainmentMarkdown :global(p + pre), +.entertainmentMarkdown :global(pre + p) { + margin-top: var(--space-1); +} + +.error .summary { + color: var(--red-11); +} + +.errorBadge { + flex-shrink: 0; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.noTransition { + transition: none !important; +} + +.contentInner { + overflow: hidden; +} + +.content { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + padding: var(--space-2) 0 var(--space-1); + max-height: 400px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.tsx new file mode 100644 index 000000000..a4094d6c7 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/StreamingToolCard.tsx @@ -0,0 +1,181 @@ +import React, { useCallback, useEffect, useMemo, useRef } from "react"; +import { Flex, Text, Box, Spinner } from "@radix-ui/themes"; +import classNames from "classnames"; +import { useAutoExpandCollapse, ToolStatus } from "./useAutoExpandCollapse"; +import { useAppSelector } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ToolCall } from "../../../services/refact/types"; +import { Markdown, ShikiCodeBlock } from "../../Markdown"; +import { useDelayedUnmount } from "../../shared/useDelayedUnmount"; +import { ToolCallTooltip } from "./ToolCallTooltip"; +import styles from "./StreamingToolCard.module.css"; + +const MAX_MD_RENDER_CHARS = 50_000; + +function looksLikeMarkdown(text: string): boolean { + if (text.includes("```")) return true; + if (/\[[^\]]+\]\([^)]+\)/.test(text)) return true; + if (/^#{1,6}\s+\S/m.test(text)) return true; + if (/^\s*([-*+])\s+\S/m.test(text)) return true; + if (/^\s*\d+\.\s+\S/m.test(text)) return true; + const hasTableHeader = /^\s*\|.+\|\s*$/m.test(text); + const hasTableSep = /^\s*\|[\s:|-]+\|\s*$/m.test(text); + if (hasTableHeader && hasTableSep) return true; + return false; +} + +interface StreamingToolCardProps { + toolCall: ToolCall; + icon: React.ReactNode; + summary: React.ReactNode; + meta?: string | null; +} + +export const StreamingToolCard: React.FC<StreamingToolCardProps> = ({ + toolCall, + icon, + summary, + meta, +}) => { + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if (maybeResult.tool_failed) return "error"; + return "success"; + }, [maybeResult]); + + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const { isOpen, onToggle, animate } = useAutoExpandCollapse({ + status, + storeKey, + }); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const shouldRenderMarkdown = + content && + content.length <= MAX_MD_RENDER_CHARS && + looksLikeMarkdown(content); + + const { shouldRender, isAnimatingOpen } = useDelayedUnmount( + isOpen && !!content, + 200, + animate, + ); + + const entertainmentMessage = useMemo(() => { + if (status !== "running") return null; + const log = toolCall.subchat_log; + if (!log || log.length === 0) return null; + const last = log[log.length - 1]; + const stepMatch = last.match(/^(\d+\/\d+):\s*([\s\S]+)$/); + if (stepMatch) { + return { step: stepMatch[1], text: stepMatch[2].trim() }; + } + return { step: null, text: last }; + }, [status, toolCall.subchat_log]); + + const entertainmentRef = useRef<HTMLDivElement | null>(null); + const userScrolledRef = useRef(false); + + const handleEntertainmentScroll = useCallback(() => { + const el = entertainmentRef.current; + if (!el) return; + const isAtBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 20; + userScrolledRef.current = !isAtBottom; + }, []); + + useEffect(() => { + if (status !== "running") return; + const el = entertainmentRef.current; + if (!el) return; + if (userScrolledRef.current) return; + if (el.scrollTop + el.clientHeight + 20 < el.scrollHeight) { + el.scrollTop = el.scrollHeight; + } + }, [status, entertainmentMessage?.text]); + + const header = ( + <Flex + className={classNames(styles.header, status === "error" && styles.error)} + align="center" + gap="2" + onClick={onToggle} + > + <span className={styles.icon}> + {status === "running" ? <Spinner size="1" /> : icon} + </span> + <Text + size="1" + className={classNames( + styles.summary, + status === "running" && styles.running, + )} + > + {summary} + </Text> + {meta && ( + <Text size="1" color="gray" className={styles.meta}> + {meta} + </Text> + )} + {status === "error" && ( + <Text size="1" color="red" className={styles.errorBadge}> + failed + </Text> + )} + </Flex> + ); + + return ( + <div className={styles.card}> + <ToolCallTooltip toolCall={toolCall}>{header}</ToolCallTooltip> + + {entertainmentMessage && ( + <div + className={styles.entertainmentContent} + ref={entertainmentRef} + onScroll={handleEntertainmentScroll} + > + <div className={styles.entertainmentMarkdown}> + <Markdown canHaveInteractiveElements={false}> + {entertainmentMessage.text} + </Markdown> + </div> + </div> + )} + + {shouldRender && content && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + !animate && styles.noTransition, + )} + > + <div className={styles.contentInner}> + <Box className={styles.content}> + {shouldRenderMarkdown ? ( + <Text size="2"> + <Markdown>{content}</Markdown> + </Text> + ) : ( + <ShikiCodeBlock showLineNumbers={false}> + {content} + </ShikiCodeBlock> + )} + </Box> + </div> + </div> + )} + </div> + ); +}; + +export default StreamingToolCard; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/SubagentTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/SubagentTool.tsx new file mode 100644 index 000000000..fe3ee229f --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/SubagentTool.tsx @@ -0,0 +1,46 @@ +import React, { useMemo } from "react"; +import { PersonIcon } from "@radix-ui/react-icons"; +import { ToolCall } from "../../../services/refact/types"; +import { StreamingToolCard } from "./StreamingToolCard"; + +interface SubagentArgs { + task?: string; + expected_result?: string; + tools?: string; + max_steps?: string; +} + +interface SubagentToolProps { + toolCall: ToolCall; +} + +export const SubagentTool: React.FC<SubagentToolProps> = ({ toolCall }) => { + const args = useMemo<SubagentArgs>(() => { + try { + return JSON.parse(toolCall.function.arguments) as SubagentArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const summary = `Analyze "${args.task ?? "task"}"`; + + const meta = + [ + args.tools && `tools: ${args.tools}`, + args.max_steps && `max: ${args.max_steps}`, + ] + .filter(Boolean) + .join(" · ") || null; + + return ( + <StreamingToolCard + toolCall={toolCall} + icon={<PersonIcon />} + summary={summary} + meta={meta} + /> + ); +}; + +export default SubagentTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.module.css new file mode 100644 index 000000000..861dac739 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.module.css @@ -0,0 +1,22 @@ +.taskDoneCard { + border-left: 2px solid var(--green-9); + padding-left: var(--space-2); +} + +.successText { + color: var(--green-11); +} + +.content { + padding: var(--space-2) 0; +} + +.fileLink { + color: var(--accent-11); + cursor: pointer; + text-decoration: underline; +} + +.fileLink:hover { + color: var(--accent-12); +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.tsx new file mode 100644 index 000000000..3928a33a0 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/TaskDoneTool.tsx @@ -0,0 +1,103 @@ +import React, { useMemo, useCallback } from "react"; +import { CheckCircledIcon } from "@radix-ui/react-icons"; +import { Box, Flex, Text } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector, useEventsBusForIDE } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ToolCall } from "../../../services/refact/types"; +import { Markdown } from "../../Markdown"; +import styles from "./TaskDoneTool.module.css"; +import { basename } from "./utils"; + +interface TaskDoneResult { + type: "task_done"; + summary: string; + report: string; + files_changed?: string[]; + knowledge_path?: string; +} + +interface TaskDoneToolProps { + toolCall: ToolCall; +} + +export const TaskDoneTool: React.FC<TaskDoneToolProps> = ({ toolCall }) => { + const { queryPathThenOpenFile } = useEventsBusForIDE(); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const handleFileClick = useCallback( + (e: React.MouseEvent, filePath: string) => { + e.stopPropagation(); + void queryPathThenOpenFile({ file_path: filePath }); + }, + [queryPathThenOpenFile], + ); + + const data = useMemo((): TaskDoneResult | null => { + if (!maybeResult || typeof maybeResult.content !== "string") return null; + try { + return JSON.parse(maybeResult.content) as TaskDoneResult; + } catch { + return null; + } + }, [maybeResult]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if (maybeResult.tool_failed) return "error"; + return "success"; + }, [maybeResult]); + + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey, true); + + const summary = data?.summary ?? "Task completed"; + + return ( + <ToolCard + icon={<CheckCircledIcon />} + summary={<Text className={styles.successText}>✅ {summary}</Text>} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + className={styles.taskDoneCard} + toolCall={toolCall} + > + {data && ( + <Box className={styles.content}> + <Markdown>{data.report}</Markdown> + + {data.files_changed && data.files_changed.length > 0 && ( + <Flex gap="2" wrap="wrap" mt="3" align="center"> + <Text size="1" color="gray"> + Files: + </Text> + {data.files_changed.map((f) => ( + <Text + key={f} + size="1" + className={styles.fileLink} + onClick={(e) => handleFileClick(e, f)} + > + {basename(f)} + </Text> + ))} + </Flex> + )} + + {data.knowledge_path && ( + <Text size="1" color="gray" mt="2" as="p"> + 💾 Saved to knowledge + </Text> + )} + </Box> + )} + </ToolCard> + ); +}; + +export default TaskDoneTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.module.css new file mode 100644 index 000000000..2889cfa7b --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.module.css @@ -0,0 +1,47 @@ +.stats { + color: var(--gray-11); + font-family: var(--code-font-family); +} + +.taskList { + display: flex; + flex-direction: column; + gap: var(--space-1); + padding: var(--space-1) 0; +} + +.taskItem { + padding: var(--space-1) var(--space-2); +} + +.taskItem svg { + width: 14px; + height: 14px; + flex-shrink: 0; +} + +.pending { + color: var(--gray-11); +} + +.inProgress { + color: var(--accent-11); + animation: spin 1s linear infinite; +} + +.completed { + color: var(--green-11); +} + +.failed { + color: var(--red-11); +} + +@keyframes spin { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.tsx new file mode 100644 index 000000000..42088d70b --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/TasksTool.tsx @@ -0,0 +1,121 @@ +import React, { useMemo } from "react"; +import { + CheckCircledIcon, + CircleIcon, + CrossCircledIcon, + UpdateIcon, +} from "@radix-ui/react-icons"; +import { Flex, Text, Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { useAppSelector } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ToolCall } from "../../../services/refact/types"; +import styles from "./TasksTool.module.css"; + +interface Task { + id: string; + content: string; + status: "pending" | "in_progress" | "completed" | "failed"; +} + +interface TasksSetArgs { + tasks?: Task[]; +} + +interface TasksToolProps { + toolCall: ToolCall; +} + +const TaskStatusIcon: React.FC<{ status: Task["status"] }> = ({ status }) => { + switch (status) { + case "completed": + return <CheckCircledIcon className={styles.completed} />; + case "failed": + return <CrossCircledIcon className={styles.failed} />; + case "in_progress": + return <UpdateIcon className={styles.inProgress} />; + default: + return <CircleIcon className={styles.pending} />; + } +}; + +const TaskItem: React.FC<{ task: Task }> = ({ task }) => { + return ( + <Flex align="center" gap="2" className={styles.taskItem}> + <TaskStatusIcon status={task.status} /> + <Text size="1" className={styles[task.status]}> + {task.content} + </Text> + </Flex> + ); +}; + +export const TasksTool: React.FC<TasksToolProps> = ({ toolCall }) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const tasks = useMemo((): Task[] => { + try { + const args = JSON.parse(toolCall.function.arguments) as TasksSetArgs; + return args.tasks ?? []; + } catch { + return []; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const stats = useMemo(() => { + const completed = tasks.filter((t) => t.status === "completed").length; + const total = tasks.length; + return { completed, total }; + }, [tasks]); + + const summary = useMemo(() => { + if (tasks.length === 0) return "Update tasks"; + return ( + <> + Tasks{" "} + <span className={styles.stats}> + {stats.completed}/{stats.total} + </span> + </> + ); + }, [tasks.length, stats]); + + return ( + <ToolCard + icon={<CheckCircledIcon />} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {tasks.length > 0 && ( + <Box className={styles.taskList}> + {tasks.map((task) => ( + <TaskItem key={task.id} task={task} /> + ))} + </Box> + )} + </ToolCard> + ); +}; + +export default TasksTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.module.css new file mode 100644 index 000000000..9dbc0ca56 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.module.css @@ -0,0 +1,54 @@ +.tooltipPopup { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + position: fixed; + z-index: 9999; + background: var(--color-panel-solid); + border: 1px solid var(--gray-a6); + border-radius: var(--radius-2); + padding: var(--space-3); + font-family: var(--code-font-family); + font-size: var(--font-size-1); + line-height: 1.5; + color: var(--gray-12); + max-width: 600px; + min-width: 200px; + max-height: 400px; + overflow-y: auto; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3); +} + +.toolName { + font-weight: 600; + color: var(--accent-11); + white-space: nowrap; +} + +.separator { + height: 1px; + background: var(--gray-a5); + margin: var(--space-2) 0; +} + +.args { + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.argRow { + display: flex; + flex-direction: column; + gap: 2px; +} + +.argKey { + color: var(--gray-a11); + font-size: var(--font-size-1); + flex-shrink: 0; +} + +.argValue { + color: var(--gray-12); + white-space: pre-wrap; + word-break: break-word; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.tsx new file mode 100644 index 000000000..63ba2d0c4 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCallTooltip.tsx @@ -0,0 +1,140 @@ +import React, { + useMemo, + useState, + useRef, + useCallback, + useEffect, +} from "react"; +import { ToolCall } from "../../../services/refact/types"; +import { Portal } from "../../Portal"; +import styles from "./ToolCallTooltip.module.css"; + +const DELAY_MS = 10000; + +function parseArgs(toolCall: ToolCall): [string, string][] { + try { + const parsed = JSON.parse(toolCall.function.arguments) as Record< + string, + unknown + >; + return Object.entries(parsed).map(([k, v]) => [ + k, + typeof v === "string" ? v : JSON.stringify(v, null, 2), + ]); + } catch { + if (toolCall.function.arguments) { + return [["(raw)", toolCall.function.arguments]]; + } + return []; + } +} + +interface ToolCallTooltipProps { + toolCall: ToolCall; + children: React.ReactNode; +} + +export const ToolCallTooltip: React.FC<ToolCallTooltipProps> = ({ + toolCall, + children, +}) => { + const [visible, setVisible] = useState(false); + const [pos, setPos] = useState({ x: 0, y: 0 }); + const openTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null); + const closeTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null); + const wrapperRef = useRef<HTMLDivElement>(null); + + const toolName = toolCall.function.name ?? "unknown"; + const entries = useMemo(() => parseArgs(toolCall), [toolCall]); + + const clearOpenTimer = useCallback(() => { + if (openTimerRef.current) { + clearTimeout(openTimerRef.current); + openTimerRef.current = null; + } + }, []); + + const clearCloseTimer = useCallback(() => { + if (closeTimerRef.current) { + clearTimeout(closeTimerRef.current); + closeTimerRef.current = null; + } + }, []); + + const scheduleClose = useCallback(() => { + clearCloseTimer(); + closeTimerRef.current = setTimeout(() => { + setVisible(false); + }, 100); + }, [clearCloseTimer]); + + const cancelClose = useCallback(() => { + clearCloseTimer(); + }, [clearCloseTimer]); + + const handleWrapperEnter = useCallback(() => { + cancelClose(); + openTimerRef.current = setTimeout(() => { + if (wrapperRef.current) { + const rect = wrapperRef.current.getBoundingClientRect(); + setPos({ x: rect.left, y: rect.top - 8 }); + } + setVisible(true); + }, DELAY_MS); + }, [cancelClose]); + + const handleWrapperLeave = useCallback(() => { + clearOpenTimer(); + scheduleClose(); + }, [clearOpenTimer, scheduleClose]); + + const handlePopupEnter = useCallback(() => { + cancelClose(); + }, [cancelClose]); + + const handlePopupLeave = useCallback(() => { + scheduleClose(); + }, [scheduleClose]); + + useEffect(() => { + return () => { + clearOpenTimer(); + clearCloseTimer(); + }; + }, [clearOpenTimer, clearCloseTimer]); + + return ( + <div + ref={wrapperRef} + onMouseEnter={handleWrapperEnter} + onMouseLeave={handleWrapperLeave} + > + {children} + {visible && ( + <Portal> + <div + className={styles.tooltipPopup} + style={{ left: pos.x, top: pos.y, transform: "translateY(-100%)" }} + onMouseEnter={handlePopupEnter} + onMouseLeave={handlePopupLeave} + > + <div className={styles.toolName}>{toolName}</div> + {entries.length > 0 && ( + <> + <div className={styles.separator} /> + <div className={styles.args}> + {entries.map(([key, value]) => ( + <div key={key} className={styles.argRow}> + <span className={styles.argKey}>{key}</span> + <span className={styles.argValue}>{value}</span> + </div> + ))} + </div> + </> + )} + </div> + </Portal> + )} + </div> + ); +}; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.module.css new file mode 100644 index 000000000..3311728a5 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.module.css @@ -0,0 +1,118 @@ +.card { + overflow: hidden; +} + +.header { + padding: var(--space-1) 0; + cursor: pointer; + user-select: none; + min-height: 24px; + transition: filter 0.15s ease; +} + +.header:hover { + filter: brightness(1.3); +} + +.iconWrapper { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; + height: 16px; + flex-shrink: 0; +} + +.iconWrapper svg { + width: 14px; + height: 14px; +} + +.summary { + color: inherit; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + flex: 1 1 auto; + min-width: 0; +} + +.meta { + flex: 0 10 auto; + min-width: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + text-align: right; +} + +.contentWrapper { + overflow: hidden; + /* Force GPU layer for JCEF compatibility */ + transform: translateZ(0); + /* CSS-based collapse animation using grid */ + display: grid; + grid-template-rows: 0fr; + opacity: 0; + transition: + grid-template-rows 0.2s ease-out, + opacity 0.15s ease-out; +} + +.contentWrapperOpen { + grid-template-rows: 1fr; + opacity: 1; +} + +.noTransition { + transition: none !important; +} + +.contentInner { + overflow: hidden; +} + +.content { + padding: var(--space-2) 0 var(--space-1); + padding-left: var(--space-4); +} + +/* Running state - subtle text fade */ +@keyframes subtlePulse { + 0%, + 100% { + opacity: 0.9; + } + 50% { + opacity: 0.6; + } +} + +.running .summary { + color: var(--gray-11); + animation: subtlePulse 2s ease-in-out infinite; +} + +/* Completion - no animation, just ensure normal state */ +.completed { + /* Intentionally no animation - completion is implicit */ +} + +/* Error - subtle color change, no shake */ +.error .summary { + color: var(--red-11); +} + +/* Respect reduced motion preference */ +@media (prefers-reduced-motion: reduce) { + .running .summary, + .completed, + .error { + animation: none; + } + + .contentWrapper { + transition: none; + } +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.tsx new file mode 100644 index 000000000..3acd14c6d --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/ToolCard.tsx @@ -0,0 +1,93 @@ +import React from "react"; +import { Flex, Text, Spinner } from "@radix-ui/themes"; +import classNames from "classnames"; +import { useDelayedUnmount } from "../../shared/useDelayedUnmount"; +import { ToolCallTooltip } from "./ToolCallTooltip"; +import { ToolCall } from "../../../services/refact/types"; +import styles from "./ToolCard.module.css"; + +export type ToolStatus = "running" | "success" | "error"; + +export interface ToolCardProps { + icon: React.ReactNode; + summary: React.ReactNode; + meta?: React.ReactNode; + status: ToolStatus; + isOpen: boolean; + onToggle: () => void; + children?: React.ReactNode; + className?: string; + animate?: boolean; + toolCall?: ToolCall; +} + +export const ToolCard: React.FC<ToolCardProps> = ({ + icon, + summary, + meta, + status, + isOpen, + onToggle, + children, + className, + animate = true, + toolCall, +}) => { + const { shouldRender, isAnimatingOpen } = useDelayedUnmount( + isOpen, + 200, + animate, + ); + + const header = ( + <Flex className={styles.header} align="center" gap="2" onClick={onToggle}> + <span className={styles.iconWrapper}> + {status === "running" ? <Spinner size="1" /> : icon} + </span> + + <Text size="1" className={styles.summary}> + {summary} + </Text> + + {meta && ( + <Text size="1" color="gray" className={styles.meta}> + {meta} + </Text> + )} + </Flex> + ); + + return ( + <div + className={classNames( + styles.card, + status === "running" && styles.running, + status === "success" && styles.completed, + status === "error" && styles.error, + className, + )} + > + {toolCall ? ( + <ToolCallTooltip toolCall={toolCall}>{header}</ToolCallTooltip> + ) : ( + header + )} + + {shouldRender && children && ( + <div + className={classNames( + styles.contentWrapper, + isAnimatingOpen && styles.contentWrapperOpen, + !animate && styles.noTransition, + )} + > + <div className={styles.contentInner}> + <div className={styles.content}>{children}</div> + </div> + </div> + )} + </div> + ); +}; + +export default ToolCard; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.module.css b/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.module.css new file mode 100644 index 000000000..da29329ed --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.module.css @@ -0,0 +1,15 @@ +.url { + font-family: var(--code-font-family); + font-size: 0.95em; + color: var(--accent-11); +} + +.query { + color: var(--accent-11); +} + +.resultContent { + composes: scrollbarThin from "../../shared/scrollbar.module.css"; + max-height: 300px; + overflow-y: auto; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.tsx b/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.tsx new file mode 100644 index 000000000..dcf70dbb3 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/WebTool.tsx @@ -0,0 +1,117 @@ +import React, { useMemo } from "react"; +import { GlobeIcon } from "@radix-ui/react-icons"; +import { Box } from "@radix-ui/themes"; +import { ToolCard, ToolStatus } from "./ToolCard"; +import { useStoredOpen } from "../useStoredOpen"; +import { ContextFileList } from "./ContextFileList"; +import { useAppSelector } from "../../../hooks"; +import { selectToolResultById } from "../../../features/Chat/Thread/selectors"; +import { ChatContextFile, ToolCall } from "../../../services/refact/types"; +import { ShikiCodeBlock } from "../../Markdown"; +import styles from "./WebTool.module.css"; + +type WebToolType = "web" | "web_search"; + +interface WebArgs { + url?: string; +} + +interface WebSearchArgs { + query?: string; +} + +interface WebToolProps { + toolCall: ToolCall; + toolType: WebToolType; + contextFiles?: ChatContextFile[]; +} + +function extractDomain(url: string): string { + try { + const parsed = new URL(url); + return parsed.hostname; + } catch { + return url; + } +} + +export const WebTool: React.FC<WebToolProps> = ({ + toolCall, + toolType, + contextFiles, +}) => { + const storeKey = toolCall.id ? `tc:${toolCall.id}` : undefined; + const [isOpen, handleToggle] = useStoredOpen(storeKey); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const args = useMemo((): WebArgs | WebSearchArgs => { + try { + return JSON.parse(toolCall.function.arguments) as WebArgs | WebSearchArgs; + } catch { + return {}; + } + }, [toolCall.function.arguments]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult]); + + const content = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + const summary = useMemo(() => { + if (toolType === "web") { + const webArgs = args as WebArgs; + const url = webArgs.url ?? "page"; + return ( + <> + Fetch <span className={styles.url}>{extractDomain(url)}</span> + </> + ); + } + + // toolType === "web_search" + const searchArgs = args as WebSearchArgs; + const query = searchArgs.query ?? "query"; + return ( + <> + Search web <span className={styles.query}>&quot;{query}&quot;</span> + </> + ); + }, [toolType, args]); + + return ( + <ToolCard + icon={<GlobeIcon />} + summary={summary} + status={status} + isOpen={isOpen} + onToggle={handleToggle} + toolCall={toolCall} + > + {content && ( + <Box className={styles.resultContent}> + <ShikiCodeBlock showLineNumbers={false}>{content}</ShikiCodeBlock> + </Box> + )} + {contextFiles && contextFiles.length > 0 && ( + <ContextFileList files={contextFiles} /> + )} + </ToolCard> + ); +}; + +export default WebTool; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/index.ts b/refact-agent/gui/src/components/ChatContent/ToolCard/index.ts new file mode 100644 index 000000000..843653c26 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/index.ts @@ -0,0 +1,33 @@ +export { ToolCard, type ToolCardProps, type ToolStatus } from "./ToolCard"; +export { ReadTool } from "./ReadTool"; +export { ListTool } from "./ListTool"; +export { SearchTool } from "./SearchTool"; +export { WebTool } from "./WebTool"; +export { KnowledgeTool } from "./KnowledgeTool"; +export { ContextFileList } from "./ContextFileList"; +export { StreamingToolCard } from "./StreamingToolCard"; +export { ShellTool } from "./ShellTool"; +export { SubagentTool } from "./SubagentTool"; +export { PlanningTool } from "./PlanningTool"; +export { CodeReviewTool } from "./CodeReviewTool"; +export { ResearchTool } from "./ResearchTool"; +export { ShellServiceTool } from "./ShellServiceTool"; +export { EditTool } from "./EditTool"; +export { FileOpTool } from "./FileOpTool"; +export { TasksTool } from "./TasksTool"; +export { GenericTool } from "./GenericTool"; +export { OpenAIResponsesTool } from "./OpenAIResponsesTool"; +export { OpenAIWebSearchCallTool } from "./OpenAIWebSearchCallTool"; +export { OpenAIFileSearchCallTool } from "./OpenAIFileSearchCallTool"; +export { OpenAICodeInterpreterCallTool } from "./OpenAICodeInterpreterCallTool"; +export { OpenAIComputerCallTool } from "./OpenAIComputerCallTool"; +export { OpenAIComputerCallOutputTool } from "./OpenAIComputerCallOutputTool"; +export { OpenAIImageGenerationCallTool } from "./OpenAIImageGenerationCallTool"; +export { OpenAIAudioTool } from "./OpenAIAudioTool"; +export { OpenAIRefusalTool } from "./OpenAIRefusalTool"; +export { OpenAIMcpCallTool } from "./OpenAIMcpCallTool"; +export { OpenAIMcpListToolsTool } from "./OpenAIMcpListToolsTool"; +export { TaskDoneTool } from "./TaskDoneTool"; +export { AskQuestionsTool } from "./AskQuestionsTool"; +export { useAutoExpandCollapse } from "./useAutoExpandCollapse"; +export { truncateMiddle, basename } from "./utils"; diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/openaiResponsesToolCardState.ts b/refact-agent/gui/src/components/ChatContent/ToolCard/openaiResponsesToolCardState.ts new file mode 100644 index 000000000..66cb7facc --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/openaiResponsesToolCardState.ts @@ -0,0 +1,100 @@ +import { useCallback, useMemo, useState } from "react"; + +import { useAppSelector } from "../../../hooks"; +import { + selectIsStreaming, + selectIsWaiting, + selectToolResultById, +} from "../../../features/Chat/Thread/selectors"; +import type { ToolCall, ToolResult } from "../../../services/refact/types"; +import type { ToolStatus } from "./ToolCard"; + +function parseJsonOrNull(text: string): unknown { + try { + return JSON.parse(text) as unknown; + } catch { + return null; + } +} + +export function toolNameLabel(name: string): string { + const stripped = name.startsWith("openai_") + ? name.slice("openai_".length) + : name; + return stripped.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase()); +} + +export type OpenAiResponsesToolCardState = { + toolName: string; + label: string; + isOpen: boolean; + toggleOpen: () => void; + status: ToolStatus; + parsedArgs: unknown; + rawJson: string; + maybeResult: ToolResult | undefined; + contentText: string | null; +}; + +export function useOpenAiResponsesToolCardState( + toolCall: ToolCall, +): OpenAiResponsesToolCardState { + const [isOpen, setIsOpen] = useState(false); + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + + const maybeResult = useAppSelector((state) => + selectToolResultById(state, toolCall.id), + ); + + const toolName = toolCall.function.name ?? "openai"; + const label = useMemo(() => toolNameLabel(toolName), [toolName]); + + const status: ToolStatus = useMemo(() => { + if (!maybeResult && (isStreaming || isWaiting)) return "running"; + if (!maybeResult) return "running"; + if ( + typeof maybeResult === "object" && + "tool_failed" in maybeResult && + maybeResult.tool_failed + ) { + return "error"; + } + return "success"; + }, [maybeResult, isStreaming, isWaiting]); + + const toggleOpen = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + const parsedArgs = useMemo( + () => parseJsonOrNull(toolCall.function.arguments), + [toolCall.function.arguments], + ); + + const rawJson = useMemo(() => { + if (parsedArgs == null) return toolCall.function.arguments; + try { + return JSON.stringify(parsedArgs, null, 2); + } catch { + return toolCall.function.arguments; + } + }, [parsedArgs, toolCall.function.arguments]); + + const contentText = + maybeResult && typeof maybeResult.content === "string" + ? maybeResult.content + : null; + + return { + toolName, + label, + isOpen, + toggleOpen, + status, + parsedArgs, + rawJson, + maybeResult, + contentText, + }; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/useAutoExpandCollapse.ts b/refact-agent/gui/src/components/ChatContent/ToolCard/useAutoExpandCollapse.ts new file mode 100644 index 000000000..aea87c203 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/useAutoExpandCollapse.ts @@ -0,0 +1,84 @@ +import { useState, useEffect, useCallback, useRef } from "react"; +import { useCollapsibleStore } from "../useStoredOpen"; + +export type ToolStatus = "running" | "success" | "error"; + +interface UseAutoExpandCollapseOptions { + status: ToolStatus; + collapseDelayMs?: number; + storeKey?: string; +} + +interface UseAutoExpandCollapseResult { + isOpen: boolean; + onToggle: () => void; + animate: boolean; +} + +export function useAutoExpandCollapse({ + status, + collapseDelayMs = 500, + storeKey, +}: UseAutoExpandCollapseOptions): UseAutoExpandCollapseResult { + const store = useCollapsibleStore(); + const initialOpen = storeKey && store ? store.get(storeKey) : undefined; + + const [isOpen, setIsOpen] = useState(initialOpen ?? status === "running"); + const [animate, setAnimate] = useState(false); + const userToggledRef = useRef(false); + const prevStatusRef = useRef(status); + const finalizedRef = useRef(status !== "running"); + const collapseTimerRef = useRef<ReturnType<typeof setTimeout> | null>(null); + + useEffect(() => { + if (storeKey && store) store.set(storeKey, isOpen); + }, [storeKey, store, isOpen]); + + useEffect(() => { + if (finalizedRef.current) { + return; + } + + if (status === "running" && prevStatusRef.current !== "running") { + if (!userToggledRef.current) { + setAnimate(false); + setIsOpen(true); + } + } + + if (status !== "running" && prevStatusRef.current === "running") { + finalizedRef.current = true; + if (userToggledRef.current) { + prevStatusRef.current = status; + return; + } + collapseTimerRef.current = setTimeout(() => { + collapseTimerRef.current = null; + setAnimate(false); + setIsOpen(false); + userToggledRef.current = false; + }, collapseDelayMs); + prevStatusRef.current = status; + return () => { + if (collapseTimerRef.current !== null) { + clearTimeout(collapseTimerRef.current); + collapseTimerRef.current = null; + } + }; + } + + prevStatusRef.current = status; + }, [status, collapseDelayMs]); + + const onToggle = useCallback(() => { + userToggledRef.current = true; + if (collapseTimerRef.current !== null) { + clearTimeout(collapseTimerRef.current); + collapseTimerRef.current = null; + } + setAnimate(true); + setIsOpen((prev) => !prev); + }, []); + + return { isOpen, onToggle, animate }; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolCard/utils.ts b/refact-agent/gui/src/components/ChatContent/ToolCard/utils.ts new file mode 100644 index 000000000..799e99d85 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/ToolCard/utils.ts @@ -0,0 +1,13 @@ +export function truncateMiddle(str: string, maxLength: number): string { + if (str.length <= maxLength) return str; + const ellipsis = "…"; + const charsToShow = maxLength - ellipsis.length; + const frontChars = Math.ceil(charsToShow / 2); + const backChars = Math.floor(charsToShow / 2); + return str.slice(0, frontChars) + ellipsis + str.slice(-backChars); +} + +export function basename(path: string): string { + const parts = path.split("/"); + return parts[parts.length - 1] || path; +} diff --git a/refact-agent/gui/src/components/ChatContent/ToolsContent.tsx b/refact-agent/gui/src/components/ChatContent/ToolsContent.tsx index c68e66b19..76bd84d2e 100644 --- a/refact-agent/gui/src/components/ChatContent/ToolsContent.tsx +++ b/refact-agent/gui/src/components/ChatContent/ToolsContent.tsx @@ -1,17 +1,16 @@ -import React, { forwardRef, useCallback, useMemo, useRef } from "react"; +import React, { + forwardRef, + useCallback, + useEffect, + useMemo, + useRef, +} from "react"; import * as Collapsible from "@radix-ui/react-collapsible"; +import { Container, Flex, Text, Box, Spinner } from "@radix-ui/themes"; import { - Container, - Flex, - Text, - Box, - Spinner, - Card, - Separator, -} from "@radix-ui/themes"; -import { + ChatContextFile, + DiffChunk, isMultiModalToolResult, - // knowledgeApi, MultiModalToolResult, ToolCall, ToolResult, @@ -35,18 +34,54 @@ import { DialogImage } from "../DialogImage"; import { RootState } from "../../app/store"; import { selectFeatures } from "../../features/Config/configSlice"; import { isRawTextDocToolCall } from "../Tools/types"; -import { TextDocTool } from "../Tools/Textdoc"; -import { MarkdownCodeBlock } from "../Markdown/CodeBlock"; +import { useCollapsibleStore } from "./useStoredOpen"; +import { ShikiCodeBlock } from "../Markdown/ShikiCodeBlock"; import { Markdown } from "../Markdown"; import classNames from "classnames"; -import resultStyle from "react-syntax-highlighter/dist/esm/styles/hljs/arta"; -import { FadedButton } from "../Buttons"; import { AnimatedText } from "../Text"; +import { + ReadTool, + ListTool, + SearchTool, + WebTool, + KnowledgeTool, + ShellTool as NewShellTool, + SubagentTool as NewSubagentTool, + PlanningTool, + CodeReviewTool as NewCodeReviewTool, + ResearchTool, + ShellServiceTool as NewShellServiceTool, + EditTool, + FileOpTool, + TasksTool, + GenericTool, + TaskDoneTool, + AskQuestionsTool, + OpenAIResponsesTool, + OpenAIWebSearchCallTool, + OpenAIFileSearchCallTool, + OpenAICodeInterpreterCallTool, + OpenAIComputerCallTool, + OpenAIComputerCallOutputTool, + OpenAIImageGenerationCallTool, + OpenAIAudioTool, + OpenAIRefusalTool, + OpenAIMcpCallTool, + OpenAIMcpListToolsTool, +} from "./ToolCard"; + +function parseProgressEntry(entry: string): { step?: string; text: string } { + const m = entry.match(/^(\d+\/\d+):\s*([\s\S]+)$/); + if (!m) return { text: entry }; + const [, step, text] = m; + return { step, text }; +} type ResultProps = { children: string; isInsideScrollArea?: boolean; onClose?: () => void; + storeKey?: string; }; function looksLikeMarkdown(text: string): boolean { @@ -67,14 +102,19 @@ function looksLikeMarkdown(text: string): boolean { const MAX_MD_RENDER_CHARS = 50_000; -const Result: React.FC<ResultProps> = ({ children, onClose }) => { +const Result: React.FC<ResultProps> = ({ children, onClose, storeKey }) => { const lines = children.split("\n"); const shouldRenderMarkdown = children.length <= MAX_MD_RENDER_CHARS && looksLikeMarkdown(children); return ( - <Reveal defaultOpen={lines.length < 9} isRevealingCode onClose={onClose}> + <Reveal + defaultOpen={lines.length < 9} + isRevealingCode + onClose={onClose} + storeKey={storeKey} + > {shouldRenderMarkdown ? ( <Text size="2"> <Box @@ -83,16 +123,13 @@ const Result: React.FC<ResultProps> = ({ children, onClose }) => { styles.tool_result_markdown, )} > - <Markdown style={resultStyle}>{children}</Markdown> + <Markdown>{children}</Markdown> </Box> </Text> ) : ( - <MarkdownCodeBlock - className={classNames(styles.tool_result)} - style={resultStyle} - > + <ShikiCodeBlock className={classNames(styles.tool_result)}> {children} - </MarkdownCodeBlock> + </ShikiCodeBlock> )} </Reveal> ); @@ -143,7 +180,11 @@ const ToolMessage: React.FC<{ </Box> </ScrollArea> {maybeResult?.content && ( - <Result isInsideScrollArea onClose={onClose}> + <Result + isInsideScrollArea + onClose={onClose} + storeKey={toolCall.id ? `rv:${toolCall.id}` : undefined} + > {maybeResult.content} </Result> )} @@ -167,23 +208,44 @@ const ToolUsageDisplay: React.FC<{ export const SingleModelToolContent: React.FC<{ toolCalls: ToolCall[]; }> = ({ toolCalls }) => { - const [open, setOpen] = React.useState(false); const ref = useRef<HTMLDivElement>(null); const handleHide = useHideScroll(ref); const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); + const store = useCollapsibleStore(); const toolCallsId = useMemo(() => { - const ids = toolCalls.reduce<string[]>((acc, toolCall) => { + return toolCalls.reduce<string[]>((acc, toolCall) => { if (typeof toolCall.id === "string") return [...acc, toolCall.id]; return acc; }, []); - - return ids; }, [toolCalls]); - const results = useAppSelector(selectManyToolResultsByIds(toolCallsId)); - const diffs = useAppSelector(selectManyDiffMessageByIds(toolCallsId)); + const toolCallsIdKey = toolCallsId.join("|"); + const storeKey = toolCallsId[0] ? `tg:${toolCallsId[0]}` : undefined; + const [open, setOpen] = React.useState(() => { + if (storeKey && store) { + const stored = store.get(storeKey); + if (stored !== undefined) return stored; + } + return false; + }); + + useEffect(() => { + if (storeKey && store) store.set(storeKey, open); + }, [storeKey, store, open]); + const selectResults = useMemo( + () => selectManyToolResultsByIds(toolCallsId), + // eslint-disable-next-line react-hooks/exhaustive-deps + [toolCallsIdKey], + ); + const selectDiffs = useMemo( + () => selectManyDiffMessageByIds(toolCallsId), + // eslint-disable-next-line react-hooks/exhaustive-deps + [toolCallsIdKey], + ); + const results = useAppSelector(selectResults); + const diffs = useAppSelector(selectDiffs); const allResolved = useMemo(() => { return results.length + diffs.length === toolCallsId.length; }, [diffs.length, results.length, toolCallsId.length]); @@ -225,15 +287,12 @@ export const SingleModelToolContent: React.FC<{ }; }); - const subchat: string | undefined = toolCalls - .map((toolCall) => toolCall.subchat) - .filter((x) => x)[0]; + const subchatLog: string[] = toolCalls.flatMap((tc) => tc.subchat_log ?? []); const attachedFiles = toolCalls - .map((toolCall) => toolCall.attached_files) - .filter((x) => x) - .flat(); - const shownAttachedFiles = attachedFiles.slice(-4); - const hiddenFiles = attachedFiles.length - 4; + .flatMap((tc) => tc.attached_files ?? []) + .filter((f, i, arr) => arr.indexOf(f) === i); + const shownAttachedFiles = attachedFiles.slice(-6); + const hiddenFiles = Math.max(0, attachedFiles.length - 6); // Use this for single tool result return ( @@ -245,7 +304,7 @@ export const SingleModelToolContent: React.FC<{ toolUsageAmount={toolUsageAmount} hiddenFiles={hiddenFiles} shownAttachedFiles={shownAttachedFiles} - subchat={subchat} + subchatLog={subchatLog} open={open} onClick={() => setOpen((prev) => !prev)} waiting={busy} @@ -275,9 +334,15 @@ export const SingleModelToolContent: React.FC<{ export type ToolContentProps = { toolCalls: ToolCall[]; + contextFilesByToolId?: Record<string, ChatContextFile[]>; + diffsByToolId?: Record<string, DiffChunk[]>; }; -export const ToolContent: React.FC<ToolContentProps> = ({ toolCalls }) => { +export const ToolContent: React.FC<ToolContentProps> = ({ + toolCalls, + contextFilesByToolId, + diffsByToolId, +}) => { const features = useAppSelector(selectFeatures); const ids = toolCalls.reduce<string[]>((acc, cur) => { if (cur.id !== undefined) return [...acc, cur.id]; @@ -285,7 +350,14 @@ export const ToolContent: React.FC<ToolContentProps> = ({ toolCalls }) => { }, []); const allToolResults = useAppSelector(selectManyToolResultsByIds(ids)); - return processToolCalls(toolCalls, allToolResults, features); + return processToolCalls( + toolCalls, + allToolResults, + features, + [], + contextFilesByToolId, + diffsByToolId, + ); }; function processToolCalls( @@ -293,30 +365,543 @@ function processToolCalls( toolResults: ToolResult[], features: RootState["config"]["features"] = {}, processed: React.ReactNode[] = [], + contextFilesByToolId: Record<string, ChatContextFile[]> = {}, + diffsByToolId: Record<string, DiffChunk[]> = {}, ) { if (toolCalls.length === 0) return processed; const [head, ...tail] = toolCalls; const result = toolResults.find((result) => result.tool_call_id === head.id); + const contextFiles = head.id ? contextFilesByToolId[head.id] : undefined; + const diffs = head.id ? diffsByToolId[head.id] : undefined; + + if (head.function.name === "cat") { + const elem = ( + <ReadTool + key={`read-tool-${processed.length}`} + toolCall={head} + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "tree") { + const elem = ( + <ListTool + key={`list-tool-${processed.length}`} + toolCall={head} + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "search_pattern") { + const elem = ( + <SearchTool + key={`search-pattern-tool-${processed.length}`} + toolCall={head} + toolType="search_pattern" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "search_semantic") { + const elem = ( + <SearchTool + key={`search-semantic-tool-${processed.length}`} + toolCall={head} + toolType="search_semantic" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "search_symbol_definition") { + const elem = ( + <SearchTool + key={`search-symbol-tool-${processed.length}`} + toolCall={head} + toolType="search_symbol_definition" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "shell") { + const elem = ( + <NewShellTool key={`shell-tool-${processed.length}`} toolCall={head} /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "shell_service") { + const elem = ( + <NewShellServiceTool + key={`shell-service-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "subagent") { + const elem = ( + <NewSubagentTool + key={`subagent-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } - // TODO: handle knowledge differently. - // memories are split in content with 🗃️019957b6ff + if (head.function.name === "strategic_planning") { + const elem = ( + <PlanningTool + key={`strategic-planning-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "code_review") { + const elem = ( + <NewCodeReviewTool + key={`code-review-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "deep_research") { + const elem = ( + <ResearchTool + key={`deep-research-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "knowledge") { + const elem = ( + <KnowledgeTool + key={`knowledge-tool-${processed.length}`} + toolCall={head} + toolType="knowledge" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "search_trajectories") { + const elem = ( + <KnowledgeTool + key={`trajectories-tool-${processed.length}`} + toolCall={head} + toolType="search_trajectories" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } - if (result && head.function.name === "knowledge") { + if (head.function.name === "get_trajectory_context") { const elem = ( - <Knowledge key={`knowledge-tool-${processed.length}`} toolCall={head} /> + <KnowledgeTool + key={`trajectory-context-tool-${processed.length}`} + toolCall={head} + toolType="trajectories" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "create_knowledge") { + const elem = ( + <KnowledgeTool + key={`create-knowledge-tool-${processed.length}`} + toolCall={head} + toolType="create_knowledge" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "web") { + const elem = ( + <WebTool + key={`web-tool-${processed.length}`} + toolCall={head} + toolType="web" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "web_search") { + const elem = ( + <WebTool + key={`web-search-tool-${processed.length}`} + toolCall={head} + toolType="web_search" + contextFiles={contextFiles} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, ); - return processToolCalls(tail, toolResults, features, [...processed, elem]); } if (isRawTextDocToolCall(head)) { const elem = ( - <TextDocTool - key={`textdoc-tool-${head.function.name}-${processed.length}`} + <EditTool + key={`edit-tool-${head.function.name}-${processed.length}`} + toolCall={head} + diffs={diffs} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "mv") { + const elem = ( + <FileOpTool + key={`mv-tool-${processed.length}`} + toolCall={head} + toolType="mv" + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "rm") { + const elem = ( + <FileOpTool + key={`rm-tool-${processed.length}`} toolCall={head} - toolFailed={result?.tool_failed} + toolType="rm" + diffs={diffs} /> ); - return processToolCalls(tail, toolResults, features, [...processed, elem]); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "add_workspace_folder") { + const elem = ( + <FileOpTool + key={`add-workspace-tool-${processed.length}`} + toolCall={head} + toolType="add_workspace_folder" + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "tasks_set") { + const elem = ( + <TasksTool key={`tasks-tool-${processed.length}`} toolCall={head} /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "task_done") { + const elem = ( + <TaskDoneTool + key={`task-done-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name === "ask_questions") { + const elem = ( + <AskQuestionsTool + key={`ask-questions-tool-${processed.length}`} + toolCall={head} + /> + ); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); + } + + if (head.function.name?.startsWith("openai_")) { + const name = head.function.name; + let elem: React.ReactNode; + switch (name) { + case "openai_web_search_call": + elem = ( + <OpenAIWebSearchCallTool + key={`openai-web-search-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_file_search_call": + elem = ( + <OpenAIFileSearchCallTool + key={`openai-file-search-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_code_interpreter_call": + elem = ( + <OpenAICodeInterpreterCallTool + key={`openai-code-interpreter-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_computer_call": + elem = ( + <OpenAIComputerCallTool + key={`openai-computer-call-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_computer_call_output": + elem = ( + <OpenAIComputerCallOutputTool + key={`openai-computer-output-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_image_generation_call": + elem = ( + <OpenAIImageGenerationCallTool + key={`openai-image-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_audio": + elem = ( + <OpenAIAudioTool + key={`openai-audio-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_refusal": + elem = ( + <OpenAIRefusalTool + key={`openai-refusal-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_mcp_call": + elem = ( + <OpenAIMcpCallTool + key={`openai-mcp-call-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + case "openai_mcp_list_tools": + elem = ( + <OpenAIMcpListToolsTool + key={`openai-mcp-list-tools-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + break; + default: + elem = ( + <OpenAIResponsesTool + key={`openai-responses-tool-${head.id ?? processed.length}`} + toolCall={head} + /> + ); + } + + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); } if (result && isMultiModalToolResult(result)) { @@ -341,74 +926,85 @@ function processToolCalls( toolResults={multiModalToolResults} /> ); - return processToolCalls(nextTail, toolResults, features, [ - ...processed, - elem, - ]); - } - - const restInTail = takeWhile(tail, (toolCall) => { - const item = toolResults.find( - (result) => result.tool_call_id === toolCall.id, + return processToolCalls( + nextTail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, ); - return item === undefined || !isMultiModalToolResult(item); - }); - const nextTail = tail.slice(restInTail.length); + } + // Fallback: use GenericTool for any unhandled tool const elem = ( - <SingleModelToolContent - key={`single-model-tool-call-${processed.length}`} - toolCalls={[head, ...restInTail]} + <GenericTool + key={`generic-tool-${head.id ?? processed.length}`} + toolCall={head} /> ); - return processToolCalls(nextTail, toolResults, features, [ - ...processed, - elem, - ]); + return processToolCalls( + tail, + toolResults, + features, + [...processed, elem], + contextFilesByToolId, + diffsByToolId, + ); } const MultiModalToolContent: React.FC<{ toolCalls: ToolCall[]; toolResults: MultiModalToolResult[]; }> = ({ toolCalls, toolResults }) => { - const [open, setOpen] = React.useState(false); const ref = useRef<HTMLDivElement>(null); const handleHide = useHideScroll(ref); const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); + const store = useCollapsibleStore(); + const ids = useMemo(() => { - return toolCalls.reduce<string[]>((acc, cur) => { - if (typeof cur === "string") return [...acc, cur]; - return acc; - }, []); + return toolCalls + .map((tc) => tc.id) + .filter((id): id is string => typeof id === "string"); }, [toolCalls]); - const diffs = useAppSelector(selectManyDiffMessageByIds(ids)); + const idsKey = ids.join("|"); + const mmStoreKey = ids[0] ? `mm:${ids[0]}` : undefined; + const [open, setOpen] = React.useState(() => { + if (mmStoreKey && store) { + const stored = store.get(mmStoreKey); + if (stored !== undefined) return stored; + } + return false; + }); + + useEffect(() => { + if (mmStoreKey && store) store.set(mmStoreKey, open); + }, [mmStoreKey, store, open]); + + const selectDiffs = useMemo( + () => selectManyDiffMessageByIds(ids), + // eslint-disable-next-line react-hooks/exhaustive-deps + [idsKey], + ); + const diffs = useAppSelector(selectDiffs); const handleClose = useCallback(() => { handleHide(); setOpen(false); }, [handleHide]); - // const content = toolResults.map((toolResult) => toolResult.content); const hasImages = toolResults.some((toolResult) => toolResult.content.some((content) => content.m_type.startsWith("image/")), ); - // TOOD: duplicated const toolNames = toolCalls.reduce<string[]>((acc, toolCall) => { - // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition - if (toolCall === null) { - // eslint-disable-next-line no-console - console.error("toolCall is null"); - return acc; - } if (!toolCall.function.name) return acc; if (acc.includes(toolCall.function.name)) return acc; return [...acc, toolCall.function.name]; }, []); - // TODO: duplicated const toolUsageAmount = toolNames.map<ToolUsage>((toolName) => { return { functionName: toolName, @@ -520,26 +1116,44 @@ const MultiModalToolContent: React.FC<{ type ToolUsageSummaryProps = { toolUsageAmount: ToolUsage[]; hiddenFiles?: number; - shownAttachedFiles?: (string | undefined)[]; - subchat?: string; + shownAttachedFiles?: string[]; + subchatLog?: string[]; open: boolean; onClick?: () => void; waiting: boolean; }; +function getFileIcon(path: string): string { + if (path.endsWith("/") || !path.includes(".")) return "📂"; + return "📄"; +} + +function truncatePath(path: string, maxLen = 50): string { + if (path.length <= maxLen) return path; + const parts = path.split("/"); + if (parts.length <= 2) return "…" + path.slice(-maxLen + 1); + const filename = parts[parts.length - 1]; + const dir = parts[parts.length - 2]; + const short = `…/${dir}/${filename}`; + if (short.length <= maxLen) return short; + return "…" + path.slice(-maxLen + 1); +} + const ToolUsageSummary = forwardRef<HTMLDivElement, ToolUsageSummaryProps>( ( { toolUsageAmount, hiddenFiles, shownAttachedFiles, - subchat, + subchatLog, open, onClick, waiting, }, ref, ) => { + const currentStep = (subchatLog ?? []).slice(-1)[0]; + return ( <AnimatedText as="div" weight="light" size="1" animating={waiting}> <Flex gap="2" align="end" onClick={onClick} ref={ref} my="2"> @@ -550,7 +1164,7 @@ const ToolUsageSummary = forwardRef<HTMLDivElement, ToolUsageSummaryProps>( style={{ cursor: "pointer" }} > <Flex gap="2" align="center" justify="center"> - {waiting ? <Spinner /> : "🔨"} {/* 🔨{" "} */} + {waiting ? <Spinner /> : "🔨"} {toolUsageAmount.map(({ functionName, amountOfCalls }, index) => ( <span key={functionName}> <ToolUsageDisplay @@ -562,28 +1176,44 @@ const ToolUsageSummary = forwardRef<HTMLDivElement, ToolUsageSummaryProps>( ))} </Flex> - {hiddenFiles && hiddenFiles > 0 && ( + {hiddenFiles !== undefined && hiddenFiles > 0 && ( <Text weight="light" size="1" ml="4"> - {`🔎 <${hiddenFiles} files hidden>`} + {`<+${hiddenFiles} more files>`} </Text> )} - {shownAttachedFiles?.map((file, index) => { - if (!file) return null; - - return ( - <Text weight="light" size="1" key={index} ml="4"> - 🔎 {file} - </Text> - ); - })} - {subchat && ( - <Flex ml="4"> - {waiting && <Spinner />} - <Text weight="light" size="1" ml="4px"> - {subchat} - </Text> - </Flex> - )} + {shownAttachedFiles?.map((file, index) => ( + <Text weight="light" size="1" key={index} ml="4"> + {getFileIcon(file)} {truncatePath(file)} + </Text> + ))} + {currentStep && + (() => { + const parsed = parseProgressEntry(currentStep); + return ( + <Flex direction="column" gap="1" ml="4" mt="1"> + {parsed.step && ( + <Flex align="center" gap="1"> + {waiting && <Spinner size="1" />} + <Text weight="light" size="1"> + {parsed.step}: + </Text> + </Flex> + )} + <Text + size="1" + color="gray" + as="div" + ml={parsed.step ? "4" : "0"} + style={{ + whiteSpace: "pre-wrap", + wordBreak: "break-word", + }} + > + {parsed.text} + </Text> + </Flex> + ); + })()} </Flex> <Chevron open={open} /> </Flex> @@ -592,114 +1222,3 @@ const ToolUsageSummary = forwardRef<HTMLDivElement, ToolUsageSummaryProps>( }, ); ToolUsageSummary.displayName = "ToolUsageSummary"; - -// TODO: make this look nicer. -const Knowledge: React.FC<{ toolCall: ToolCall }> = ({ toolCall }) => { - const [open, setOpen] = React.useState(false); - const ref = useRef(null); - const scrollOnHide = useHideScroll(ref); - - const handleHide = useCallback(() => { - setOpen(false); - scrollOnHide(); - }, [scrollOnHide]); - - const maybeResult = useAppSelector((state) => - selectToolResultById(state, toolCall.id), - ); - - const argsString = React.useMemo(() => { - return toolCallArgsToString(toolCall.function.arguments); - }, [toolCall.function.arguments]); - - const memories = useMemo(() => { - if (typeof maybeResult?.content !== "string") return []; - return splitMemories(maybeResult.content); - }, [maybeResult?.content]); - - const functionCalled = "```python\n" + name + "(" + argsString + ")\n```"; - - return ( - <Container> - <Collapsible.Root open={open} onOpenChange={setOpen}> - <Collapsible.Trigger asChild> - <Flex - gap="2" - align="end" - onClick={() => setOpen((prev) => !prev)} - ref={ref} - > - <Flex - gap="1" - align="start" - direction="column" - style={{ cursor: "pointer" }} - > - <Text weight="light" size="1"> - 📚 Knowledge - </Text> - </Flex> - <Chevron open={open} /> - </Flex> - </Collapsible.Trigger> - <Collapsible.Content> - <Flex direction="column" pt="4"> - <ScrollArea scrollbars="horizontal" style={{ width: "100%" }}> - <Box> - <CommandMarkdown isInsideScrollArea> - {functionCalled} - </CommandMarkdown> - </Box> - </ScrollArea> - <Flex gap="4" direction="column" py="4"> - {memories.map((memory) => { - return ( - <Memory - key={memory.memid} - id={memory.memid} - content={memory.content} - /> - ); - })} - </Flex> - <FadedButton color="gray" onClick={handleHide} mx="2"> - Hide Memories - </FadedButton> - </Flex> - </Collapsible.Content> - </Collapsible.Root> - </Container> - ); -}; - -const Memory: React.FC<{ id: string; content: string }> = ({ id, content }) => { - return ( - <Card> - <Flex direction="column" gap="2"> - <Flex justify="between" align="center"> - <Text size="1" weight="light"> - Memory: {id} - </Text> - </Flex> - <Separator size="4" /> - <Text size="2">{content}</Text> - </Flex> - </Card> - ); -}; - -function splitMemories(text: string): { memid: string; content: string }[] { - // Split by 🗃️ and filter out empty strings - const parts = text.split("🗃️").filter((part) => part.trim()); - - return parts.map((part) => { - const newlineIndex = part.indexOf("\n"); - const memid = part.substring(0, newlineIndex); - const content = part.substring(newlineIndex + 1); - - return { - memid, - content, - }; - }); -} diff --git a/refact-agent/gui/src/components/ChatContent/UserInput.tsx b/refact-agent/gui/src/components/ChatContent/UserInput.tsx index b43420eac..75e13c773 100644 --- a/refact-agent/gui/src/components/ChatContent/UserInput.tsx +++ b/refact-agent/gui/src/components/ChatContent/UserInput.tsx @@ -1,39 +1,57 @@ -import { Pencil2Icon } from "@radix-ui/react-icons"; -import { Button, Container, Flex, IconButton, Text } from "@radix-ui/themes"; +import { Box, Container, Flex } from "@radix-ui/themes"; +import { useCopyToClipboard } from "../../hooks/useCopyToClipboard"; import React, { useCallback, useMemo, useState } from "react"; -import { selectMessages } from "../../features/Chat"; -import { CheckpointButton } from "../../features/Checkpoints"; -import { useAppSelector } from "../../hooks"; -import { - isUserMessage, - ProcessedUserMessageContentWithImages, - UserMessageContentWithImage, - type UserMessage, -} from "../../services/refact"; -import { takeWhile } from "../../utils"; +import type { UserMessage } from "../../services/refact"; +import type { Checkpoint } from "../../features/Checkpoints/types"; import { RetryForm } from "../ChatForm"; import { DialogImage } from "../DialogImage"; import { Markdown } from "../Markdown"; import styles from "./ChatContent.module.css"; import { Reveal } from "../Reveal"; +import { MessageFooter, MessageWrapper } from "./MessageFooter"; export type UserInputProps = { children: UserMessage["content"]; messageIndex: number; - // maybe add images argument ? + messageId?: string; + checkpoints?: Checkpoint[]; onRetry: (index: number, question: UserMessage["content"]) => void; - // disableRetry?: boolean; + onBranch?: (messageId: string) => void; + onDelete?: (messageId: string) => void; }; -export const UserInput: React.FC<UserInputProps> = ({ +const _UserInput: React.FC<UserInputProps> = ({ messageIndex, + messageId, + checkpoints, children, onRetry, + onBranch, + onDelete, }) => { - const messages = useAppSelector(selectMessages); + const copyToClipboard = useCopyToClipboard(); const [showTextArea, setShowTextArea] = useState(false); - const [isEditButtonVisible, setIsEditButtonVisible] = useState(false); + + const handleCopy = useCallback(() => { + const text = + typeof children === "string" + ? children + : children + .filter((c) => { + if ("type" in c && c.type === "text") return true; + if ("m_type" in c && c.m_type === "text") return true; + return false; + }) + .map((c) => { + if ("text" in c) return c.text; + if ("m_content" in c) return String(c.m_content); + return ""; + }) + .filter(Boolean) + .join("\n"); + copyToClipboard(text); + }, [children, copyToClipboard]); const handleSubmit = useCallback( (value: UserMessage["content"]) => { @@ -43,196 +61,145 @@ export const UserInput: React.FC<UserInputProps> = ({ [messageIndex, onRetry], ); - const handleShowTextArea = useCallback( - (value: boolean) => { - setShowTextArea(value); - if (isEditButtonVisible) { - setIsEditButtonVisible(false); - } - }, - [isEditButtonVisible], - ); + const handleEditClick = useCallback((event: React.MouseEvent) => { + // Don't enter edit mode if user clicked on interactive elements + const target = event.target as HTMLElement; + const tagName = target.tagName.toLowerCase(); + + const isInteractiveElement = + tagName === "a" || + tagName === "code" || + tagName === "pre" || + tagName === "button"; + const hasInteractiveParent = + target.closest("a") !== null || + target.closest("pre") !== null || + target.closest("button") !== null; + + if (isInteractiveElement || hasInteractiveParent) { + return; + } + + // Skip if user is selecting text + const selection = window.getSelection(); + if (selection && selection.toString().length > 0) { + return; + } + + setShowTextArea(true); + }, []); + + // Extract text content for rendering + const textContent = useMemo(() => { + if (typeof children === "string") return children; + return children + .filter((c) => { + if ("type" in c && c.type === "text") return true; + if ("m_type" in c && c.m_type === "text") return true; + return false; + }) + .map((c) => { + if ("text" in c) return c.text; + if ("m_content" in c) return String(c.m_content); + return ""; + }) + .filter(Boolean) + .join("\n"); + }, [children]); - // const lines = children.split("\n"); // won't work if it's an array - const elements = process(children); - const isString = typeof children === "string"; - const linesLength = isString ? children.split("\n").length : Infinity; + // Extract images for rendering + const images = useMemo(() => { + if (typeof children === "string") return []; + return children.filter((c) => { + if ("type" in c && c.type === "image_url") return true; + if ("m_type" in c && c.m_type.startsWith("image/")) return true; + return false; + }); + }, [children]); - const checkpointsFromMessage = useMemo(() => { - const maybeUserMessage = messages[messageIndex]; - if (!isUserMessage(maybeUserMessage)) return null; - return maybeUserMessage.checkpoints; - }, [messageIndex, messages]); + const checkpointsFromMessage = checkpoints ?? null; const isCompressed = useMemo(() => { if (typeof children !== "string") return false; return children.startsWith("🗜️ "); }, [children]); - return ( - <Container position="relative" pt="1"> - {isCompressed ? ( - <Reveal defaultOpen={false}> - <Flex direction="row" my="1" className={styles.userInput}> - {elements} - </Flex> - </Reveal> - ) : showTextArea ? ( + if (showTextArea) { + return ( + <Container pt="1"> <RetryForm onSubmit={handleSubmit} - // TODO - // value={children} value={children} - onClose={() => handleShowTextArea(false)} + onClose={() => setShowTextArea(false)} /> - ) : ( - <Flex - direction="row" - // checking for the length of the lines to determine the position of the edit button - gap={linesLength <= 2 ? "2" : "1"} - // TODO: what is it's a really long sentence or word with out new lines? - align={linesLength <= 2 ? "center" : "end"} - my="1" - onMouseEnter={() => setIsEditButtonVisible(true)} - onMouseLeave={() => setIsEditButtonVisible(false)} - > - <Button - // ref={ref} - variant="soft" - size="4" - className={styles.userInput} - // TODO: should this work? - // onClick={() => handleShowTextArea(true)} - asChild - > - <div>{elements}</div> - </Button> - <Flex - direction={linesLength <= 3 ? "row" : "column"} - gap="1" - style={{ - opacity: isEditButtonVisible ? 1 : 0, - visibility: isEditButtonVisible ? "visible" : "hidden", - transition: "opacity 0.15s, visibility 0.15s", - }} - > - {checkpointsFromMessage && checkpointsFromMessage.length > 0 && ( - <CheckpointButton - checkpoints={checkpointsFromMessage} - messageIndex={messageIndex} - /> - )} - <IconButton - title="Edit message" - variant="soft" - size={"2"} - onClick={() => handleShowTextArea(true)} - > - <Pencil2Icon width={15} height={15} /> - </IconButton> - </Flex> - </Flex> - )} - </Container> - ); -}; - -function process(items: UserInputProps["children"]) { - if (typeof items !== "string") { - return processUserInputArray(items); - } - return processLines(items.split("\n")); -} - -function processLines( - lines: string[], - processedLinesMemo: JSX.Element[] = [], -): JSX.Element[] { - if (lines.length === 0) return processedLinesMemo; - - const [head, ...tail] = lines; - const nextBackTicksIndex = tail.findIndex((l) => l.startsWith("```")); - const key = `line-${processedLinesMemo.length + 1}`; - - if (!head.startsWith("```") || nextBackTicksIndex === -1) { - const processedLines = processedLinesMemo.concat( - <Text - size="2" - as="div" - key={key} - wrap="balance" - className={styles.break_word} - > - {head} - </Text>, + </Container> ); - return processLines(tail, processedLines); } - const endIndex = nextBackTicksIndex + 1; - - const code = [head].concat(tail.slice(0, endIndex)).join("\n"); - const processedLines = processedLinesMemo.concat( - <Markdown key={key}>{code}</Markdown>, - ); - - const next = tail.slice(endIndex); - return processLines(next, processedLines); -} - -function isUserContentImage( - item: UserMessageContentWithImage | ProcessedUserMessageContentWithImages, -) { return ( - ("m_type" in item && item.m_type.startsWith("image/")) || - ("type" in item && item.type === "image_url") - ); -} - -function processUserInputArray( - items: ( - | UserMessageContentWithImage - | ProcessedUserMessageContentWithImages - )[], - memo: JSX.Element[] = [], -) { - if (items.length === 0) return memo; - const [head, ...tail] = items; - - if ("type" in head && head.type === "text") { - const processedLines = processLines(head.text.split("\n")); - return processUserInputArray(tail, memo.concat(processedLines)); - } - - if ("m_type" in head && head.m_type === "text") { - const processedLines = processLines(head.m_content.split("\n")); - return processUserInputArray(tail, memo.concat(processedLines)); - } - - const isImage = isUserContentImage(head); - - if (!isImage) return processUserInputArray(tail, memo); - - const imagesInTail = takeWhile(tail, isUserContentImage); - const nextTail = tail.slice(imagesInTail.length); - const images = [head, ...imagesInTail]; - const elem = ( - <Flex key={`user-image-images-${memo.length}`} gap="2" wrap="wrap" my="2"> - {images.map((image, index) => { - if ("type" in image && image.type === "image_url") { - const key = `user-input${memo.length}-${image.type}-${index}`; - const content = image.image_url.url; - return <DialogImage src={content} key={key} />; - } - if ("m_type" in image && image.m_type.startsWith("image/")) { - const key = `user-input${memo.length}-${image.m_type}-${index}`; - const content = `data:${image.m_type};base64,${image.m_content}`; - return <DialogImage src={content} key={key} />; - } - return null; - })} - </Flex> + <MessageWrapper> + <Container pt="1"> + <Flex justify="end"> + <Box className={styles.userInput} onClick={handleEditClick}> + {isCompressed ? ( + <Reveal defaultOpen={false}> + <Markdown canHaveInteractiveElements={false}> + {textContent} + </Markdown> + </Reveal> + ) : ( + <> + {textContent && ( + <Markdown canHaveInteractiveElements={true}> + {textContent} + </Markdown> + )} + {images.length > 0 && ( + <Flex + gap="2" + wrap="wrap" + mt={textContent ? "2" : "0"} + onClick={(e) => e.stopPropagation()} + > + {images.map((image, index) => { + if ("type" in image && image.type === "image_url") { + return ( + <DialogImage + key={`img-${index}`} + src={image.image_url.url} + /> + ); + } + if ( + "m_type" in image && + image.m_type.startsWith("image/") + ) { + const content = `data:${image.m_type};base64,${image.m_content}`; + return ( + <DialogImage key={`img-${index}`} src={content} /> + ); + } + return null; + })} + </Flex> + )} + </> + )} + </Box> + </Flex> + <Flex justify="end"> + <MessageFooter + messageId={messageId} + onCopy={handleCopy} + onBranch={onBranch} + onDelete={onDelete} + checkpoints={checkpointsFromMessage} + messageIndex={messageIndex} + /> + </Flex> + </Container> + </MessageWrapper> ); +}; - return processUserInputArray(nextTail, memo.concat(elem)); -} +export const UserInput = React.memo(_UserInput); diff --git a/refact-agent/gui/src/components/ChatContent/VirtualizedChatList.tsx b/refact-agent/gui/src/components/ChatContent/VirtualizedChatList.tsx new file mode 100644 index 000000000..bbf5c3fe7 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/VirtualizedChatList.tsx @@ -0,0 +1,202 @@ +/* eslint-disable react/prop-types */ +import React, { useCallback, useRef, useState, useMemo } from "react"; +import { Virtuoso, VirtuosoHandle } from "react-virtuoso"; +import { Flex, Container, Box } from "@radix-ui/themes"; +import { ScrollToBottomButton } from "../ScrollArea/ScrollToBottomButton"; +import styles from "./ChatContent.module.css"; + +export type VirtualizedChatListProps<T extends { key: string }> = { + items: T[]; + renderItem: (item: T) => React.ReactNode; + initialScrollIndex?: number; + footer?: React.ReactNode; + isStreaming?: boolean; +}; + +export function VirtualizedChatList<T extends { key: string }>({ + items, + renderItem, + initialScrollIndex, + footer, + isStreaming = false, +}: VirtualizedChatListProps<T>) { + const virtuosoRef = useRef<VirtuosoHandle>(null); + const [showFollowButton, setShowFollowButton] = useState(false); + const autoFollowRef = useRef(true); + const userScrolledUpRef = useRef(false); + const lastScrollTopRef = useRef(0); + // Timestamp of the last wheel/touch event that scrolled downward. + // Used to distinguish real user scroll-down from Virtuoso measurement + // adjustments that passively change scrollTop. + const lastActiveScrollDownTsRef = useRef(0); + + const handleAtBottomChange = useCallback((bottom: boolean) => { + if (bottom && userScrolledUpRef.current) { + // Only re-arm auto-follow if the user recently performed an active + // scroll-down gesture (wheel or touch). Virtuoso measurement + // adjustments can passively shift the scroll position into the + // atBottomThreshold — that must NOT re-arm follow. + const recentActiveScroll = + performance.now() - lastActiveScrollDownTsRef.current < 500; + if (recentActiveScroll) { + autoFollowRef.current = true; + userScrolledUpRef.current = false; + } + // When NOT an active scroll we leave userScrolledUpRef = true so the + // follow button reappears if Virtuoso later pushes us away from bottom. + } + setShowFollowButton(!bottom && userScrolledUpRef.current); + }, []); + + const handleFollowClick = useCallback(() => { + autoFollowRef.current = true; + userScrolledUpRef.current = false; + setShowFollowButton(false); + virtuosoRef.current?.scrollToIndex({ + index: items.length - 1, + align: "end", + behavior: "smooth", + }); + }, [items.length]); + + const followOutput = useCallback( + (isAtBottom: boolean) => { + if (userScrolledUpRef.current) return false; + if (isAtBottom && autoFollowRef.current) { + return isStreaming ? "auto" : "smooth"; + } + return false; + }, + [isStreaming], + ); + + const computeItemKey = useCallback((_index: number, item: T) => item.key, []); + + const itemContent = useCallback( + (_index: number, item: T) => <Container>{renderItem(item)}</Container>, + [renderItem], + ); + + const Scroller = useMemo(() => { + const ScrollerComponent = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes<HTMLDivElement> + >(function VirtuosoScroller(props, ref) { + const { children, style, onWheel, onScroll, ...restProps } = props; + const handleWheel: React.WheelEventHandler<HTMLDivElement> = (event) => { + if (event.deltaY < 0) { + autoFollowRef.current = false; + userScrolledUpRef.current = true; + setShowFollowButton(true); + } else if (event.deltaY > 0) { + lastActiveScrollDownTsRef.current = performance.now(); + } + onWheel?.(event); + }; + + const handleScroll: React.UIEventHandler<HTMLDivElement> = (event) => { + const nextScrollTop = event.currentTarget.scrollTop; + // Detect upward scroll as a safety net (keyboard, scrollbar drag, + // touch, etc. — onWheel already covers mouse/trackpad). Use a +1px + // tolerance to ignore sub-pixel Virtuoso measurement jitter. + if (nextScrollTop + 1 < lastScrollTopRef.current) { + autoFollowRef.current = false; + userScrolledUpRef.current = true; + setShowFollowButton(true); + } + // NOTE: We intentionally do NOT infer "user scrolling down" from + // scrollTop increases. Virtuoso's internal offset corrections during + // item remeasurement can increase scrollTop without any user gesture, + // and mistaking those for active scrolling would re-arm auto-follow + // and cause visible scroll jumps while reading. + lastScrollTopRef.current = nextScrollTop; + onScroll?.(event); + }; + + return ( + <div + ref={ref} + style={{ + ...style, + overflowY: "auto", + overflowX: "hidden", + }} + className={styles.virtuosoScroller} + {...restProps} + onWheel={handleWheel} + onScroll={handleScroll} + > + {children} + </div> + ); + }); + return ScrollerComponent; + }, []); + + const List = useMemo(() => { + const ListComponent = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes<HTMLDivElement> + >(function VirtuosoList({ children, style, ...props }, ref) { + return ( + <Flex + ref={ref} + direction="column" + className={styles.content} + p="2" + gap="1" + style={style} + {...props} + > + {children} + </Flex> + ); + }); + return ListComponent; + }, []); + + const Footer = useCallback( + () => ( + <> + {footer} + <Box style={{ height: 80 }} /> + </> + ), + [footer], + ); + + const components = useMemo( + () => ({ Scroller, List, Footer }), + [Scroller, List, Footer], + ); + + const viewportPadding = useMemo( + () => + isStreaming ? { top: 1600, bottom: 2400 } : { top: 3200, bottom: 4400 }, + [isStreaming], + ); + + return ( + <Box style={{ flexGrow: 1, height: "100%", position: "relative" }}> + <Virtuoso + ref={virtuosoRef} + data={items} + computeItemKey={computeItemKey} + itemContent={itemContent} + components={components} + atBottomStateChange={handleAtBottomChange} + followOutput={followOutput} + initialTopMostItemIndex={ + initialScrollIndex !== undefined + ? { index: initialScrollIndex, align: "end" } + : undefined + } + atBottomThreshold={20} + increaseViewportBy={viewportPadding} + /> + {showFollowButton && <ScrollToBottomButton onClick={handleFollowClick} />} + </Box> + ); +} + +VirtualizedChatList.displayName = "VirtualizedChatList"; diff --git a/refact-agent/gui/src/components/ChatContent/useCollapsibleState.ts b/refact-agent/gui/src/components/ChatContent/useCollapsibleState.ts new file mode 100644 index 000000000..ac1e61902 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/useCollapsibleState.ts @@ -0,0 +1,37 @@ +import { useCallback, useMemo, useState } from "react"; + +type CollapsibleState = Record<string, boolean>; + +export function useCollapsibleState(defaultOpen = false) { + const [state, setState] = useState<CollapsibleState>({}); + + const isOpen = useCallback( + (key: string) => (key in state ? state[key] : defaultOpen), + [state, defaultOpen], + ); + + const setOpen = useCallback((key: string, open: boolean) => { + setState((prev) => ({ ...prev, [key]: open })); + }, []); + + const toggle = useCallback( + (key: string) => { + setState((prev) => { + const current = key in prev ? prev[key] : defaultOpen; + return { ...prev, [key]: !current }; + }); + }, + [defaultOpen], + ); + + const reset = useCallback(() => { + setState({}); + }, []); + + return useMemo( + () => ({ isOpen, setOpen, toggle, reset }), + [isOpen, setOpen, toggle, reset], + ); +} + +export type CollapsibleStateManager = ReturnType<typeof useCollapsibleState>; diff --git a/refact-agent/gui/src/components/ChatContent/useCollapsibleStoreProvider.ts b/refact-agent/gui/src/components/ChatContent/useCollapsibleStoreProvider.ts new file mode 100644 index 000000000..a202b555c --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/useCollapsibleStoreProvider.ts @@ -0,0 +1,24 @@ +import { useCallback, useMemo, useRef } from "react"; +import type { CollapsibleStore } from "./CollapsibleStore"; + +export function useCollapsibleStoreProvider( + resetKey: string, +): CollapsibleStore { + const storeRef = useRef(new Map<string, boolean>()); + const prevKeyRef = useRef(resetKey); + + if (prevKeyRef.current !== resetKey) { + storeRef.current = new Map(); + prevKeyRef.current = resetKey; + } + + const get = useCallback((key: string): boolean | undefined => { + return storeRef.current.get(key); + }, []); + + const set = useCallback((key: string, open: boolean) => { + storeRef.current.set(key, open); + }, []); + + return useMemo(() => ({ get, set }), [get, set]); +} diff --git a/refact-agent/gui/src/components/ChatContent/useStoredOpen.ts b/refact-agent/gui/src/components/ChatContent/useStoredOpen.ts new file mode 100644 index 000000000..420343d09 --- /dev/null +++ b/refact-agent/gui/src/components/ChatContent/useStoredOpen.ts @@ -0,0 +1,39 @@ +import { + useCallback, + useContext, + useEffect, + useState, + createContext, +} from "react"; +import type { CollapsibleStore } from "./CollapsibleStore"; + +const CollapsibleStoreContext = createContext<CollapsibleStore | null>(null); + +export const CollapsibleStoreProvider = CollapsibleStoreContext.Provider; + +export function useCollapsibleStore(): CollapsibleStore | null { + return useContext(CollapsibleStoreContext); +} + +export function useStoredOpen( + storeKey: string | undefined, + defaultOpen = false, +): [boolean, () => void, (open: boolean) => void] { + const store = useCollapsibleStore(); + const [isOpen, setIsOpen] = useState(() => { + if (storeKey && store) { + const stored = store.get(storeKey); + if (stored !== undefined) return stored; + } + return defaultOpen; + }); + + useEffect(() => { + if (storeKey && store) store.set(storeKey, isOpen); + }, [storeKey, store, isOpen]); + + const toggle = useCallback(() => setIsOpen((prev) => !prev), []); + const setOpen = useCallback((open: boolean) => setIsOpen(open), []); + + return [isOpen, toggle, setOpen]; +} diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/AgentCapabilities.tsx b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/AgentCapabilities.tsx index 23e391a7e..7e9cac5fc 100644 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/AgentCapabilities.tsx +++ b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/AgentCapabilities.tsx @@ -1,136 +1,32 @@ -import { - MixerVerticalIcon, - QuestionMarkCircledIcon, -} from "@radix-ui/react-icons"; -import { - Flex, - HoverCard, - IconButton, - Popover, - Separator, - Text, -} from "@radix-ui/themes"; -import { - AgentRollbackSwitch, - ApplyPatchSwitch, - FollowUpsSwitch, - TitleGenerationSwitch, - UseCompressionSwitch, - ProjectInfoSwitch, -} from "../ChatControls"; -import { useAppSelector } from "../../../hooks"; -import { - selectAreFollowUpsEnabled, - selectAutomaticPatch, - selectCheckpointsEnabled, - selectIsTitleGenerationEnabled, - selectUseCompression, - selectIncludeProjectInfo, - selectMessages, -} from "../../../features/Chat"; -import { Fragment, useMemo } from "react"; -import { ToolGroups } from "./ToolGroups"; +import { Flex } from "@radix-ui/themes"; +import { UsageCounter } from "../../UsageCounter"; +import { useUsageCounter } from "../../UsageCounter/useUsageCounter"; +import { TrajectoryButton } from "../../Trajectory"; -export const AgentCapabilities = () => { - const isPatchAutomatic = useAppSelector(selectAutomaticPatch); - const isAgentRollbackEnabled = useAppSelector(selectCheckpointsEnabled); - const areFollowUpsEnabled = useAppSelector(selectAreFollowUpsEnabled); - const isTitleGenerationEnabled = useAppSelector( - selectIsTitleGenerationEnabled, - ); - const useCompression = useAppSelector(selectUseCompression); - const includeProjectInfo = useAppSelector(selectIncludeProjectInfo); - const messages = useAppSelector(selectMessages); - const isNewChat = messages.length === 0; +export type AgentCapabilitiesProps = { + trajectoryOpen?: boolean; + onTrajectoryOpenChange?: (open: boolean) => void; +}; - const agenticFeatures = useMemo(() => { - return [ - { - name: "Auto-patch", - enabled: isPatchAutomatic, - switcher: <ApplyPatchSwitch />, - }, - { - name: "Files rollback", - enabled: isAgentRollbackEnabled, - switcher: <AgentRollbackSwitch />, - }, - { - name: "Follow-Ups", - enabled: areFollowUpsEnabled, - switcher: <FollowUpsSwitch />, - }, - { - name: "Chat Titles", - enabled: isTitleGenerationEnabled, - switcher: <TitleGenerationSwitch />, - }, - { - name: "Compression", - enabled: useCompression, - switcher: <UseCompressionSwitch />, - }, - { - name: "Project info", - enabled: includeProjectInfo ?? true, - switcher: <ProjectInfoSwitch />, - hide: !isNewChat, - }, - ]; - }, [ - isPatchAutomatic, - isAgentRollbackEnabled, - areFollowUpsEnabled, - isTitleGenerationEnabled, - useCompression, - includeProjectInfo, - isNewChat, - ]); +export const AgentCapabilities = ({ + trajectoryOpen, + onTrajectoryOpenChange, +}: AgentCapabilitiesProps) => { + const { shouldShow: shouldShowUsage } = useUsageCounter(); - const enabledAgenticFeatures = useMemo( - () => - agenticFeatures - .filter( - (feature) => feature.enabled && !("hide" in feature && feature.hide), - ) - .map((feature) => feature.name) - .join(", ") || "None", - [agenticFeatures], - ); + if (!shouldShowUsage) { + return null; + } return ( - <Flex mb="2" gap="2" align="center"> - <Popover.Root> - <Popover.Trigger> - <IconButton variant="soft" size="1"> - <MixerVerticalIcon /> - </IconButton> - </Popover.Trigger> - <Popover.Content side="top" alignOffset={-10} sideOffset={20}> - <Flex gap="2" direction="column"> - {agenticFeatures.map((feature) => { - if ("hide" in feature && feature.hide) return null; - return <Fragment key={feature.name}>{feature.switcher}</Fragment>; - })} - <Separator size="4" mt="2" mb="1" /> - <ToolGroups /> - </Flex> - </Popover.Content> - </Popover.Root> - <Text size="2"> - Enabled Features: - <Text color="gray"> {enabledAgenticFeatures}</Text> - </Text> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content size="2" maxWidth="280px"> - <Text as="p" size="2"> - Here you can control special features affecting Agent behaviour - </Text> - </HoverCard.Content> - </HoverCard.Root> + <Flex mb="2" gap="2" align="center" justify="end"> + <Flex align="center" gap="1"> + <UsageCounter /> + <TrajectoryButton + forceOpen={trajectoryOpen} + onOpenChange={onTrajectoryOpenChange} + /> + </Flex> </Flex> ); }; diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.module.css b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.module.css deleted file mode 100644 index b784cbb2a..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.module.css +++ /dev/null @@ -1,18 +0,0 @@ -.toolGroup { - box-sizing: border-box; - border-radius: 4px; - transition: background-color 0.15s ease-in-out; - cursor: pointer; - &:hover { - background-color: var(--gray-a2); - } -} - -.categoryBadge { - display: inline-flex; - align-items: center; - justify-content: center; - width: 20px; - height: 20px; - padding: 0; -} diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.tsx b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.tsx deleted file mode 100644 index 6fe7ecdca..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroup.tsx +++ /dev/null @@ -1,90 +0,0 @@ -import { - ChevronRightIcon, - QuestionMarkCircledIcon, -} from "@radix-ui/react-icons"; -import { - Badge, - BadgeProps, - Box, - Flex, - Heading, - HoverCard, - Text, - Tooltip, -} from "@radix-ui/themes"; -import React, { useMemo } from "react"; -import { ToolGroup as ToolGroupType } from "../../../services/refact"; - -import styles from "./ToolGroup.module.css"; - -export type ToolGroupProps = { - group: ToolGroupType; - setSelectedToolGroup: (group: ToolGroupType) => void; -}; - -export const ToolGroup: React.FC<ToolGroupProps> = ({ - group, - setSelectedToolGroup, -}) => { - const categoryBadge = useMemo(() => { - const categoryMap: Record< - string, - { color: BadgeProps["color"]; tooltip: string } - > = { - builtin: { color: "red", tooltip: "Built-In Tools" }, - integration: { color: "yellow", tooltip: "Integrations Tools" }, - mcp: { color: "green", tooltip: "MCP Tools" }, - }; - - const { color, tooltip } = categoryMap[group.category]; - const label = group.category.charAt(0).toUpperCase(); - - return { label, color, tooltip }; - }, [group.category]); - - return ( - <Box - key={group.name} - onClick={() => setSelectedToolGroup(group)} - py="1" - pl="2" - pr="1" - className={styles.toolGroup} - > - <Heading as="h4" size="1"> - <Flex align="center" justify="between"> - <Flex as="span" align="center" gap="1"> - <Flex align="center" gap="2"> - <Tooltip content={categoryBadge.tooltip}> - <Badge - size="1" - className={styles.categoryBadge} - color={categoryBadge.color} - > - {categoryBadge.label} - </Badge> - </Tooltip> - {group.name} - </Flex> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="center" size="1"> - <Text as="p" size="1"> - {group.description} - </Text> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - <Flex align="center" gap="1"> - <Tooltip content="Indicates how many tools the group contains"> - <Badge color="indigo">{group.tools.length}</Badge> - </Tooltip> - <ChevronRightIcon /> - </Flex> - </Flex> - </Heading> - </Box> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroupList.tsx b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroupList.tsx deleted file mode 100644 index 89bfe6b4e..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroupList.tsx +++ /dev/null @@ -1,46 +0,0 @@ -import React from "react"; -import { motion } from "framer-motion"; -import { Flex } from "@radix-ui/themes"; - -import { ToolGroup } from "./ToolGroup"; -import { ScrollArea } from "../../ScrollArea"; - -import { ToolGroup as ToolGroupType } from "../../../services/refact"; - -export type ToolGroupListProps = { - groups: ToolGroupType[]; - onSelect: (group: ToolGroupType | null) => void; -}; - -export const ToolGroupList: React.FC<ToolGroupListProps> = ({ - groups, - onSelect, -}) => { - return ( - <motion.div - key="group-list" - initial={{ opacity: 0, x: -40 }} - animate={{ opacity: 1, x: 0 }} - exit={{ opacity: 0, x: -40 }} - transition={{ duration: 0.25 }} - > - <ScrollArea - scrollbars="vertical" - type="auto" - style={{ - maxHeight: "125px", - }} - > - <Flex direction="column" gap="1" pr={groups.length < 4 ? "0" : "3"}> - {groups.map((toolGroup) => ( - <ToolGroup - key={toolGroup.name} - group={toolGroup} - setSelectedToolGroup={onSelect} - /> - ))} - </Flex> - </ScrollArea> - </motion.div> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroups.tsx b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroups.tsx deleted file mode 100644 index db815c105..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolGroups.tsx +++ /dev/null @@ -1,75 +0,0 @@ -import { Flex, Heading, Skeleton } from "@radix-ui/themes"; -import { AnimatePresence } from "framer-motion"; -import React from "react"; - -import { useGetToolGroupsQuery } from "../../../hooks"; - -import { useToolGroups } from "./useToolGroups"; -import { ToolsList } from "./ToolsList"; -import { ToolGroupList } from "./ToolGroupList"; - -export const ToolGroups: React.FC = () => { - const { data: toolGroups, isLoading, isSuccess } = useGetToolGroupsQuery(); - const { - toggleAllTools, - toggleTool, - resetSelection, - selectToolGroup, - selectedToolGroup, - selectedToolGroupTools, - someToolsEnabled, - } = useToolGroups(); - - if (isLoading || !isSuccess) return <ToolGroupsSkeleton />; - - return ( - <Flex direction="column" gap="3" style={{ overflow: "hidden" }}> - <Heading size="3" as="h3"> - Manage Tool Groups - </Heading> - <AnimatePresence mode="wait" initial={false}> - {!selectedToolGroup ? ( - <ToolGroupList - key="group-list" - groups={toolGroups} - onSelect={selectToolGroup} - /> - ) : ( - <> - {selectedToolGroupTools && ( - <ToolsList - key="tools-list" - group={selectedToolGroup} - tools={selectedToolGroupTools} - onToggle={toggleTool} - onToggleAll={toggleAllTools} - onBack={resetSelection} - someEnabled={someToolsEnabled} - /> - )} - </> - )} - </AnimatePresence> - </Flex> - ); -}; - -const ToolGroupsSkeleton: React.FC = () => { - return ( - <Flex direction="column" gap="3" style={{ overflow: "hidden" }}> - <Heading size="3" as="h3"> - Manage Tool Groups - </Heading> - <Flex direction="column" gap="1"> - {[1, 2].map((idx) => ( - <Flex key={idx} align="center" justify="between" gap="1"> - <Skeleton width="30px" height="25px" /> - <Skeleton width="100%" height="25px" /> - <Skeleton width="30px" height="25px" /> - <Skeleton width="30px" height="25px" /> - </Flex> - ))} - </Flex> - </Flex> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolsList.tsx b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolsList.tsx deleted file mode 100644 index 50f44859a..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/ToolsList.tsx +++ /dev/null @@ -1,125 +0,0 @@ -import React from "react"; -import { motion } from "framer-motion"; -import { - Button, - Flex, - Heading, - HoverCard, - Switch, - Text, -} from "@radix-ui/themes"; -import { - ChevronLeftIcon, - QuestionMarkCircledIcon, -} from "@radix-ui/react-icons"; - -import { ScrollArea } from "../../ScrollArea"; - -import { ToolGroup } from "../../../services/refact"; - -export type ToolsListProps = { - group: ToolGroup; - tools: ToolGroup["tools"]; - onBack: () => void; - onToggleAll: (group: ToolGroup) => void; - onToggle: ({ - tool, - parentGroup, - togglingTo, - }: { - tool: ToolGroup["tools"][number]; - parentGroup: ToolGroup; - togglingTo: boolean; - }) => void; - someEnabled: boolean; -}; - -export const ToolsList: React.FC<ToolsListProps> = ({ - group, - tools, - onToggle, - onBack, - onToggleAll, - someEnabled, -}) => { - return ( - <motion.div - key="tools-list" - initial={{ opacity: 0, x: 40 }} - animate={{ opacity: 1, x: 0 }} - exit={{ opacity: 0, x: 40 }} - transition={{ duration: 0.25 }} - > - <Flex direction="column" gap="3"> - <Flex align="center" gap="3"> - <Button variant="outline" size="1" onClick={onBack} aria-label="Back"> - <ChevronLeftIcon /> - Back - </Button> - <Heading as="h4" size="2"> - {group.name} - </Heading> - </Flex> - <Button - onClick={() => onToggleAll(group)} - size="1" - variant="outline" - color="gray" - mb="2" - > - {someEnabled ? "Unselect" : "Select"} all - </Button> - <ScrollArea - scrollbars="vertical" - type="auto" - style={{ maxHeight: "125px" }} - > - <Flex direction="column" gap="3" pr="4"> - {tools.map((tool) => ( - <Flex - key={tool.spec.name} - align="center" - gap="4" - justify="between" - > - <Flex align="center" gap="2"> - <Text as="p" size="2"> - 🔨 {tool.spec.display_name} - </Text> - {tool.spec.description.trim() !== "" && ( - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content - side="top" - align="center" - size="1" - avoidCollisions - > - <Text as="p" size="1"> - {tool.spec.description} - </Text> - </HoverCard.Content> - </HoverCard.Root> - )} - </Flex> - <Switch - size="1" - checked={tool.enabled} - onCheckedChange={(newState) => - onToggle({ - tool, - parentGroup: group, - togglingTo: newState, - }) - } - /> - </Flex> - ))} - </Flex> - </ScrollArea> - </Flex> - </motion.div> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/useToolGroups.ts b/refact-agent/gui/src/components/ChatForm/AgentCapabilities/useToolGroups.ts deleted file mode 100644 index 8a7cff505..000000000 --- a/refact-agent/gui/src/components/ChatForm/AgentCapabilities/useToolGroups.ts +++ /dev/null @@ -1,152 +0,0 @@ -import { useCallback, useEffect, useMemo, useState } from "react"; -import { useAppDispatch } from "../../../hooks"; -import { useUpdateToolGroupsMutation } from "../../../hooks/useUpdateToolGroupsMutation"; -import { - Tool, - ToolGroup, - ToolGroupUpdate, - toolsApi, - ToolSpec, -} from "../../../services/refact"; - -export function useToolGroups() { - const dispatch = useAppDispatch(); - const { mutationTrigger: updateToolGroups } = useUpdateToolGroupsMutation(); - - const [selectedToolGroup, setSelectedToolGroup] = useState<ToolGroup | null>( - null, - ); - const [selectedToolGroupTools, setSelectedToolGroupTools] = useState< - Tool[] | null - >(null); - - const selectToolGroup = useCallback( - (group: ToolGroup | null) => { - setSelectedToolGroup(group); - }, - [setSelectedToolGroup], - ); - - const someToolsEnabled = useMemo(() => { - if (!selectedToolGroupTools) return false; - return selectedToolGroupTools.some((tool) => tool.enabled); - }, [selectedToolGroupTools]); - - const handleUpdateToolGroups = useCallback( - ({ - updatedTools, - updatedGroup, - }: { - updatedTools: { enabled: boolean; spec: ToolSpec }[]; - updatedGroup: ToolGroup; - }) => { - const dataToSend: ToolGroupUpdate[] = updatedTools.map((tool) => ({ - enabled: tool.enabled, - source: tool.spec.source, - name: tool.spec.name, - })); - - updateToolGroups(dataToSend) - .then((result) => { - if (result.data) { - // TODO: reduce complexity - // it means, individual tool update - if (selectedToolGroupTools && updatedTools.length === 1) { - setSelectedToolGroupTools((prev) => { - const tool = updatedTools[0]; - return prev - ? prev.map((t) => { - if (t.spec.name === tool.spec.name) { - return { ...t, enabled: tool.enabled }; - } - return t; - }) - : selectedToolGroupTools; - }); - return; - } - setSelectedToolGroup(updatedGroup); - } - }) - .catch((error: unknown) => { - // eslint-disable-next-line no-console - console.log(error); - }); - }, - [updateToolGroups, setSelectedToolGroupTools, selectedToolGroupTools], - ); - - const toggleAllTools = useCallback( - (toolGroup: ToolGroup) => { - const updatedTools = toolGroup.tools.map((tool) => ({ - ...tool, - enabled: someToolsEnabled ? false : true, - })); - - const updatedGroup = { ...toolGroup, tools: updatedTools }; - - handleUpdateToolGroups({ - updatedTools, - updatedGroup, - }); - }, - [handleUpdateToolGroups, someToolsEnabled], - ); - - const toggleTool = useCallback( - ({ - tool, - parentGroup, - togglingTo, - }: { - tool: ToolGroup["tools"][number]; - parentGroup: ToolGroup; - togglingTo: boolean; - }) => { - const updatedTools: Tool[] = [ - { - enabled: togglingTo, - spec: tool.spec, - }, - ]; - - const updatedGroup = { - ...parentGroup, - tools: parentGroup.tools.map((t) => { - if (t.spec.name === tool.spec.name) { - return { ...tool }; - } - - return { ...t }; - }), - }; - - handleUpdateToolGroups({ - updatedTools, - updatedGroup, - }); - }, - [handleUpdateToolGroups], - ); - - const resetSelection = useCallback(() => { - dispatch(toolsApi.util.invalidateTags(["TOOL_GROUPS"])); - setSelectedToolGroup(null); - }, [dispatch]); - - useEffect(() => { - if (selectedToolGroup) { - setSelectedToolGroupTools(selectedToolGroup.tools); - } - }, [selectedToolGroup]); - - return { - toggleTool, - toggleAllTools, - resetSelection, - selectToolGroup, - selectedToolGroup, - selectedToolGroupTools, - someToolsEnabled, - }; -} diff --git a/refact-agent/gui/src/components/ChatForm/AttachmentTile.module.css b/refact-agent/gui/src/components/ChatForm/AttachmentTile.module.css new file mode 100644 index 000000000..83bc96b17 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/AttachmentTile.module.css @@ -0,0 +1,185 @@ +.tile { + position: relative; + width: 56px; + height: 56px; + border-radius: var(--radius-2); + overflow: hidden; + flex-shrink: 0; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + cursor: pointer; + transition: + background 0.15s, + box-shadow 0.15s; +} + +.tile:focus-visible { + outline: 2px solid var(--accent-8); + outline-offset: 1px; +} + +.imageThumbnail { + width: 100%; + height: 100%; + object-fit: cover; + cursor: zoom-in; +} + +.fileTile { + background: var(--gray-a3); + padding: var(--space-1); + gap: 2px; +} + +.fileTile:hover { + background: var(--gray-a4); +} + +.fileTile[data-color="blue"] { + background: var(--blue-a3); +} +.fileTile[data-color="blue"]:hover { + background: var(--blue-a4); +} + +.fileTile[data-color="orange"] { + background: var(--orange-a3); +} +.fileTile[data-color="orange"]:hover { + background: var(--orange-a4); +} + +.fileTile[data-color="yellow"] { + background: var(--yellow-a3); +} +.fileTile[data-color="yellow"]:hover { + background: var(--yellow-a4); +} + +.fileTile[data-color="purple"] { + background: var(--purple-a3); +} +.fileTile[data-color="purple"]:hover { + background: var(--purple-a4); +} + +.fileTile[data-color="pink"] { + background: var(--pink-a3); +} +.fileTile[data-color="pink"]:hover { + background: var(--pink-a4); +} + +.fileTile[data-color="red"] { + background: var(--red-a3); +} +.fileTile[data-color="red"]:hover { + background: var(--red-a4); +} + +.fileTile[data-color="cyan"] { + background: var(--cyan-a3); +} +.fileTile[data-color="cyan"]:hover { + background: var(--cyan-a4); +} + +.fileTile[data-color="green"] { + background: var(--green-a3); +} +.fileTile[data-color="green"]:hover { + background: var(--green-a4); +} + +.plainTextTile { + background: var(--gray-a3); + padding: var(--space-1); + gap: 2px; +} + +.plainTextTile:hover { + background: var(--gray-a4); +} + +.extensionBadge { + font-size: 10px; + font-weight: 600; + text-transform: uppercase; + color: var(--gray-12); + line-height: 1; +} + +.extensionBadge[data-color="blue"] { + color: var(--blue-11); +} +.extensionBadge[data-color="orange"] { + color: var(--orange-11); +} +.extensionBadge[data-color="yellow"] { + color: var(--yellow-11); +} +.extensionBadge[data-color="purple"] { + color: var(--purple-11); +} +.extensionBadge[data-color="pink"] { + color: var(--pink-11); +} +.extensionBadge[data-color="red"] { + color: var(--red-11); +} +.extensionBadge[data-color="cyan"] { + color: var(--cyan-11); +} +.extensionBadge[data-color="green"] { + color: var(--green-11); +} + +.filename { + font-size: 9px; + color: var(--gray-11); + text-align: center; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + max-width: 100%; + line-height: 1.2; +} + +.subtitle { + font-size: 8px; + color: var(--gray-10); + line-height: 1; +} + +.removeButton { + position: absolute; + top: 2px; + right: 2px; + width: 14px; + height: 14px; + min-width: 14px; + min-height: 14px; + padding: 0; + opacity: 0; + transition: opacity 0.15s ease; +} + +.copyButton { + position: absolute; + bottom: 2px; + right: 2px; + width: 14px; + height: 14px; + min-width: 14px; + min-height: 14px; + padding: 0; + opacity: 0; + transition: opacity 0.15s ease; +} + +.tile:hover .removeButton, +.tile:hover .copyButton { + opacity: 1; +} diff --git a/refact-agent/gui/src/components/ChatForm/AttachmentTile.tsx b/refact-agent/gui/src/components/ChatForm/AttachmentTile.tsx new file mode 100644 index 000000000..fafb4a212 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/AttachmentTile.tsx @@ -0,0 +1,385 @@ +import React, { useCallback, useState, useEffect } from "react"; +import { Box, Text, IconButton, Dialog, Tooltip } from "@radix-ui/themes"; +import { Cross1Icon, CopyIcon, CheckIcon } from "@radix-ui/react-icons"; +import styles from "./AttachmentTile.module.css"; + +const isMac = + typeof navigator !== "undefined" && + /Mac|iPod|iPhone|iPad/.test(navigator.platform); +const copyShortcut = isMac ? "⌘C" : "Ctrl+C"; + +async function copyToClipboard(text: string): Promise<boolean> { + // Try modern clipboard API first + try { + await navigator.clipboard.writeText(text); + return true; + } catch { + // Fall through to fallback + } + + // Fallback for iframes and older browsers + try { + const textArea = document.createElement("textarea"); + textArea.value = text; + textArea.style.position = "fixed"; + textArea.style.left = "-9999px"; + textArea.style.top = "-9999px"; + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + const success = document.execCommand("copy"); + document.body.removeChild(textArea); + return success; + } catch { + return false; + } +} + +type ExtensionColorKey = + | "blue" + | "orange" + | "yellow" + | "purple" + | "pink" + | "red" + | "cyan" + | "green" + | "gray"; + +const EXTENSION_COLORS: Record<string, ExtensionColorKey> = { + py: "blue", + rs: "orange", + js: "yellow", + ts: "blue", + tsx: "blue", + jsx: "yellow", + java: "orange", + kt: "purple", + cpp: "pink", + c: "gray", + h: "gray", + go: "cyan", + rb: "red", + php: "purple", + json: "gray", + yaml: "red", + yml: "red", + toml: "orange", + xml: "blue", + html: "orange", + css: "purple", + scss: "pink", + md: "blue", + txt: "gray", + env: "green", + sh: "green", + bash: "green", + zsh: "green", +}; + +function getExtensionColor(ext: string): ExtensionColorKey { + const color = EXTENSION_COLORS[ext.toLowerCase()] as + | ExtensionColorKey + | undefined; + return color ?? "gray"; +} + +function getExtension(filename: string): string { + if (filename.startsWith(".")) { + return filename.slice(1).toUpperCase(); + } + const parts = filename.split("."); + if (parts.length > 1) { + return parts[parts.length - 1].toUpperCase(); + } + return "FILE"; +} + +function truncateFilename(filename: string, maxLength = 12): string { + const basename = filename.split(/[/\\]/).pop() ?? filename; + if (basename.length <= maxLength) return basename; + + const ext = basename.lastIndexOf("."); + if (ext > 0) { + const name = basename.substring(0, ext); + const extension = basename.substring(ext); + const availableLength = maxLength - extension.length - 2; + if (availableLength > 0) { + return name.substring(0, availableLength) + ".." + extension; + } + } + return basename.substring(0, maxLength - 2) + ".."; +} + +export type AttachmentTileProps = + | { + kind: "image"; + id: string; + name: string; + src: string; + onRemove?: () => void; + } + | { + kind: "file"; + id: string; + name: string; + copyText: string; + subtitle?: string; + onRemove?: () => void; + onOpen?: () => void | Promise<void>; + } + | { + kind: "plain-text"; + id: string; + label: string; + preview: string; + copyText: string; + }; + +const ImageTile: React.FC<{ + src: string; + name: string; + onRemove?: () => void; +}> = ({ src, name, onRemove }) => { + return ( + <Box className={styles.tile}> + <Dialog.Root> + <Dialog.Trigger> + <img + src={src} + alt={name} + className={styles.imageThumbnail} + title={name} + /> + </Dialog.Trigger> + <Dialog.Content maxWidth="800px"> + <img + style={{ objectFit: "contain", width: "100%" }} + src={src} + alt={name} + /> + </Dialog.Content> + </Dialog.Root> + {onRemove && ( + <IconButton + type="button" + size="1" + variant="solid" + color="gray" + className={styles.removeButton} + onClick={(e) => { + e.stopPropagation(); + onRemove(); + }} + > + <Cross1Icon width={10} height={10} /> + </IconButton> + )} + </Box> + ); +}; + +const FileTile: React.FC<{ + name: string; + copyText: string; + subtitle?: string; + onRemove?: () => void; + onOpen?: () => void | Promise<void>; +}> = ({ name, copyText, subtitle, onRemove, onOpen }) => { + const ext = getExtension(name); + const colorKey = getExtensionColor(ext.toLowerCase()); + const displayName = truncateFilename(name); + const [copied, setCopied] = useState(false); + + useEffect(() => { + if (copied) { + const timer = setTimeout(() => setCopied(false), 1500); + return () => clearTimeout(timer); + } + }, [copied]); + + const handleCopy = useCallback( + async (e: React.MouseEvent | React.KeyboardEvent) => { + e.preventDefault(); + e.stopPropagation(); + const success = await copyToClipboard(copyText); + if (success) { + setCopied(true); + } + }, + [copyText], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if ((e.ctrlKey || e.metaKey) && e.key === "c") { + void handleCopy(e); + } + if (e.key === "Enter" && onOpen) { + e.preventDefault(); + void onOpen(); + } + }, + [handleCopy, onOpen], + ); + + const handleClick = useCallback(() => { + if (onOpen) { + void onOpen(); + } + }, [onOpen]); + + return ( + <Tooltip content={`${copyShortcut} to copy path`}> + <Box + className={`${styles.tile} ${styles.fileTile}`} + data-color={colorKey} + tabIndex={0} + onKeyDown={handleKeyDown} + onClick={handleClick} + title={`${name}${subtitle ? ` ${subtitle}` : ""}`} + role="button" + aria-label={`File: ${name}${subtitle ? ` ${subtitle}` : ""}`} + > + <Text className={styles.extensionBadge} data-color={colorKey}> + .{ext} + </Text> + <Text className={styles.filename}>{displayName}</Text> + {subtitle && <Text className={styles.subtitle}>{subtitle}</Text>} + <IconButton + type="button" + size="1" + variant="ghost" + color={copied ? "green" : "gray"} + className={styles.copyButton} + onClick={(e) => void handleCopy(e)} + aria-label={copied ? "Copied!" : "Copy path"} + > + {copied ? ( + <CheckIcon width={10} height={10} /> + ) : ( + <CopyIcon width={10} height={10} /> + )} + </IconButton> + {onRemove && ( + <IconButton + type="button" + size="1" + variant="solid" + color="gray" + className={styles.removeButton} + onClick={(e) => { + e.stopPropagation(); + onRemove(); + }} + aria-label="Remove" + > + <Cross1Icon width={10} height={10} /> + </IconButton> + )} + </Box> + </Tooltip> + ); +}; + +const PlainTextTile: React.FC<{ + label: string; + preview: string; + copyText: string; +}> = ({ label, preview, copyText }) => { + const [copied, setCopied] = useState(false); + + useEffect(() => { + if (copied) { + const timer = setTimeout(() => setCopied(false), 1500); + return () => clearTimeout(timer); + } + }, [copied]); + + const handleCopy = useCallback( + async (e: React.MouseEvent | React.KeyboardEvent) => { + e.preventDefault(); + e.stopPropagation(); + const success = await copyToClipboard(copyText); + if (success) { + setCopied(true); + } + }, + [copyText], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if ((e.ctrlKey || e.metaKey) && e.key === "c") { + void handleCopy(e); + } + }, + [handleCopy], + ); + + return ( + <Tooltip content={copied ? "Copied!" : `${copyShortcut} to copy`}> + <Box + className={`${styles.tile} ${styles.plainTextTile}`} + tabIndex={0} + onKeyDown={handleKeyDown} + title={ + preview.length > 100 ? `${preview.substring(0, 100)}...` : preview + } + role="button" + aria-label="Plain text content" + > + <Text className={styles.extensionBadge} data-color="gray"> + TXT + </Text> + <Text className={styles.filename}>{label}</Text> + <IconButton + type="button" + size="1" + variant="ghost" + color={copied ? "green" : "gray"} + className={styles.copyButton} + onClick={(e) => void handleCopy(e)} + aria-label={copied ? "Copied!" : "Copy content"} + > + {copied ? ( + <CheckIcon width={10} height={10} /> + ) : ( + <CopyIcon width={10} height={10} /> + )} + </IconButton> + </Box> + </Tooltip> + ); +}; + +export const AttachmentTile: React.FC<AttachmentTileProps> = (props) => { + switch (props.kind) { + case "image": + return ( + <ImageTile + src={props.src} + name={props.name} + onRemove={props.onRemove} + /> + ); + case "file": + return ( + <FileTile + name={props.name} + copyText={props.copyText} + subtitle={props.subtitle} + onRemove={props.onRemove} + onOpen={props.onOpen} + /> + ); + case "plain-text": + return ( + <PlainTextTile + label={props.label} + preview={props.preview} + copyText={props.copyText} + /> + ); + } +}; diff --git a/refact-agent/gui/src/components/ChatForm/ChatControls.tsx b/refact-agent/gui/src/components/ChatForm/ChatControls.tsx index aa3678144..bf35b6763 100644 --- a/refact-agent/gui/src/components/ChatForm/ChatControls.tsx +++ b/refact-agent/gui/src/components/ChatForm/ChatControls.tsx @@ -1,447 +1,13 @@ import React, { useCallback, useMemo } from "react"; -import { - Text, - Flex, - HoverCard, - Link, - Skeleton, - Box, - Switch, - Badge, - Button, -} from "@radix-ui/themes"; +import { Text, Flex, Skeleton, Box } from "@radix-ui/themes"; import { Select, type SelectProps } from "../Select"; -import { type Config } from "../../features/Config/configSlice"; -import { TruncateLeft } from "../Text"; -import styles from "./ChatForm.module.css"; -import classNames from "classnames"; -import { PromptSelect } from "./PromptSelect"; -import { Checkbox } from "../Checkbox"; -import { - ExclamationTriangleIcon, - LockClosedIcon, - LockOpen1Icon, - QuestionMarkCircledIcon, -} from "@radix-ui/react-icons"; -import { useTourRefs } from "../../features/Tour"; -import { ToolUseSwitch } from "./ToolUseSwitch"; -import { - ToolUse, - selectAreFollowUpsEnabled, - selectAutomaticPatch, - selectChatId, - selectCheckpointsEnabled, - selectIsStreaming, - selectIsTitleGenerationEnabled, - selectIsWaiting, - selectMessages, - selectToolUse, - selectUseCompression, - selectIncludeProjectInfo, - setAreFollowUpsEnabled, - setIsTitleGenerationEnabled, - setAutomaticPatch, - setEnabledCheckpoints, - setToolUse, - setUseCompression, - setIncludeProjectInfo, -} from "../../features/Chat/Thread"; -import { useAppSelector, useAppDispatch, useCapsForToolUse } from "../../hooks"; -import { useAttachedFiles } from "./useCheckBoxes"; +import { useCapsForToolUse } from "../../hooks"; +import { useAppDispatch } from "../../hooks"; import { push } from "../../features/Pages/pagesSlice"; import { RichModelSelectItem } from "../Select/RichModelSelectItem"; import { enrichAndGroupModels } from "../../utils/enrichModels"; -export const ApplyPatchSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const chatId = useAppSelector(selectChatId); - const isPatchAutomatic = useAppSelector(selectAutomaticPatch); - - const handleAutomaticPatchChange = (checked: boolean) => { - dispatch(setAutomaticPatch({ chatId, value: checked })); - }; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Patch files without confirmation - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Enable/disable automatic patch calls by Agent" - checked={isPatchAutomatic} - onCheckedChange={handleAutomaticPatchChange} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Text weight="bold" size="2"> - Enabled - </Text> - <Text as="p" size="1"> - When enabled, Refact Agent will automatically apply changes to - files without asking for your confirmation. - </Text> - <Text as="div" mt="2" size="2" weight="bold"> - Disabled - </Text> - <Text as="p" size="1"> - When disabled, Refact Agent will ask for your confirmation before - applying any unsaved changes. - </Text> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; -export const AgentRollbackSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const isAgentRollbackEnabled = useAppSelector(selectCheckpointsEnabled); - - const handleAgentRollbackChange = (checked: boolean) => { - dispatch(setEnabledCheckpoints(checked)); - }; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Changes rollback - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Enable/disable changed rollback made by Agent" - checked={isAgentRollbackEnabled} - onCheckedChange={handleAgentRollbackChange} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Flex direction="column" gap="2"> - <Text as="p" size="1"> - When enabled, Refact Agent will automatically make snapshots of - files between your messages - </Text> - <Text as="p" size="1"> - You can rollback file changes to checkpoints taken when you sent - messages to Agent - </Text> - <Badge - color="yellow" - asChild - style={{ - whiteSpace: "pre-wrap", - }} - > - <Flex gap="2" py="1" px="2" align="center"> - <ExclamationTriangleIcon - width={16} - height={16} - style={{ flexGrow: 1, flexShrink: 0 }} - /> - <Text as="p" size="1"> - Warning: may slow down performance of Agent in large - projects - </Text> - </Flex> - </Badge> - </Flex> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; -export const FollowUpsSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const areFollowUpsEnabled = useAppSelector(selectAreFollowUpsEnabled); - - const handleFollowUpsEnabledChange = (checked: boolean) => { - dispatch(setAreFollowUpsEnabled(checked)); - }; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Follow-Ups messages - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Enable/disable follow-ups messages generation by Agent" - checked={areFollowUpsEnabled} - onCheckedChange={handleFollowUpsEnabledChange} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Flex direction="column" gap="2"> - <Text as="p" size="1"> - When enabled, Refact Agent will automatically generate related - follow-ups to the conversation - </Text> - <Badge - color="yellow" - asChild - style={{ - whiteSpace: "pre-wrap", - }} - > - <Flex gap="2" p="2" align="center"> - <ExclamationTriangleIcon - width={16} - height={16} - style={{ flexGrow: 1, flexShrink: 0 }} - /> - <Text as="p" size="1"> - Warning: may increase coins spending - </Text> - </Flex> - </Badge> - </Flex> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; - -export const TitleGenerationSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const isTitleGenerationEnabled = useAppSelector( - selectIsTitleGenerationEnabled, - ); - - const handleTitleGenerationEnabledChange = (checked: boolean) => { - dispatch(setIsTitleGenerationEnabled(checked)); - }; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Chat Titles - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Enable/disable chat titles generation by Agent" - checked={isTitleGenerationEnabled} - onCheckedChange={handleTitleGenerationEnabledChange} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Flex direction="column" gap="2"> - <Text as="p" size="1"> - When enabled, Refact Agent will automatically generate - summarized chat title for the conversation - </Text> - <Badge - color="yellow" - asChild - style={{ - whiteSpace: "pre-wrap", - }} - > - <Flex gap="2" p="2" align="center"> - <ExclamationTriangleIcon - width={16} - height={16} - style={{ flexGrow: 1, flexShrink: 0 }} - /> - <Text as="p" size="1"> - Warning: may increase coins spending - </Text> - </Flex> - </Badge> - </Flex> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; - -export const UseCompressionSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const useCompression = useAppSelector(selectUseCompression); - - const handleUseCompressionChange = (checked: boolean) => { - dispatch(setUseCompression(checked)); - }; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Use compression - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Enable/disable context compression" - checked={useCompression ?? false} - onCheckedChange={handleUseCompressionChange} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Flex direction="column" gap="2"> - <Text as="p" size="1"> - When enabled, Refact Agent will compress the context to reduce - token usage for long conversations - </Text> - <Badge - color="yellow" - asChild - style={{ - whiteSpace: "pre-wrap", - }} - > - <Flex gap="2" p="2" align="center"> - <ExclamationTriangleIcon - width={16} - height={16} - style={{ flexGrow: 1, flexShrink: 0 }} - /> - <Text as="p" size="1"> - Warning: may increase coins spending because it breaks the - cache - </Text> - </Flex> - </Badge> - </Flex> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; - -export const ProjectInfoSwitch: React.FC = () => { - const dispatch = useAppDispatch(); - const chatId = useAppSelector(selectChatId); - const messages = useAppSelector(selectMessages); - const includeProjectInfo = useAppSelector(selectIncludeProjectInfo); - - const handleIncludeProjectInfoChange = (checked: boolean) => { - dispatch(setIncludeProjectInfo({ chatId, value: checked })); - }; - - const isNewChat = messages.length === 0; - - return ( - <Flex - gap="4" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - justify="between" - > - <Text size="2" mr="auto"> - Include project info - </Text> - <Flex gap="2" align="center"> - <Switch - size="1" - title="Include project context information" - checked={includeProjectInfo ?? true} - onCheckedChange={handleIncludeProjectInfoChange} - disabled={!isNewChat} - /> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content side="top" align="end" size="1" maxWidth="280px"> - <Flex direction="column" gap="2"> - <Text as="p" size="1"> - When enabled, extra project context information will be included - at the start of the chat to help the AI understand your codebase - better - </Text> - <Badge - color="yellow" - asChild - style={{ - whiteSpace: "pre-wrap", - }} - > - <Flex gap="2" p="2" align="center"> - <ExclamationTriangleIcon - width={16} - height={16} - style={{ flexGrow: 1, flexShrink: 0 }} - /> - <Text as="p" size="1"> - Note: This can consume a significant amount of tokens - initially. Only available when starting a new chat. - </Text> - </Flex> - </Badge> - </Flex> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}; - export const CapsSelect: React.FC<{ disabled?: boolean }> = ({ disabled }) => { - const refs = useTourRefs(); const caps = useCapsForToolUse(); const dispatch = useAppDispatch(); @@ -518,15 +84,7 @@ export const CapsSelect: React.FC<{ disabled?: boolean }> = ({ disabled }) => { }); return ( - <Flex - gap="2" - align="center" - wrap="wrap" - // flexGrow="1" - // flexShrink="0" - // width="100%" - ref={(x) => refs.setUseModel(x)} - > + <Flex gap="2" align="center" wrap="wrap"> <Skeleton loading={caps.loading}> <Box> {allDisabled ? ( @@ -547,183 +105,3 @@ export const CapsSelect: React.FC<{ disabled?: boolean }> = ({ disabled }) => { </Flex> ); }; - -type CheckboxHelp = { - text: string; - link?: string; - linkText?: string; -}; - -export type Checkbox = { - name: string; - label: string; - checked: boolean; - value?: string; - disabled: boolean; - fileName?: string; - hide?: boolean; - info?: CheckboxHelp; - locked?: boolean; -}; - -export type ChatControlsProps = { - checkboxes: Record<string, Checkbox>; - onCheckedChange: ( - name: keyof ChatControlsProps["checkboxes"], - checked: boolean | string, - ) => void; - - host: Config["host"]; - attachedFiles: ReturnType<typeof useAttachedFiles>; -}; - -const ChatControlCheckBox: React.FC<{ - name: string; - checked: boolean; - disabled?: boolean; - onCheckChange: (value: boolean | string) => void; - label: string; - fileName?: string; - infoText?: string; - href?: string; - linkText?: string; - locked?: boolean; -}> = ({ - name, - checked, - disabled, - onCheckChange, - label, - fileName, - infoText, - href, - linkText, - locked, -}) => { - return ( - <Flex justify="between"> - <Checkbox - size="1" - name={name} - checked={checked} - disabled={disabled} - onCheckedChange={onCheckChange} - > - {label} - {fileName && ( - // TODO: negative margin ? - <Flex ml="-3px"> - <TruncateLeft>{fileName}</TruncateLeft> - </Flex> - )} - {locked && <LockClosedIcon opacity="0.6" />} - {locked === false && <LockOpen1Icon opacity="0.6" />} - </Checkbox> - {infoText && ( - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content maxWidth="240px" size="1"> - <Flex direction="column" gap="4"> - <Text as="div" size="1"> - {infoText} - </Text> - - {href && linkText && ( - <Text size="1"> - Read more on our{" "} - <Link size="1" href={href}> - {linkText} - </Link> - </Text> - )} - </Flex> - </HoverCard.Content> - </HoverCard.Root> - )} - </Flex> - ); -}; - -export const ChatControls: React.FC<ChatControlsProps> = ({ - checkboxes, - onCheckedChange, - host, - attachedFiles, -}) => { - const refs = useTourRefs(); - const dispatch = useAppDispatch(); - const isStreaming = useAppSelector(selectIsStreaming); - const isWaiting = useAppSelector(selectIsWaiting); - const messages = useAppSelector(selectMessages); - const toolUse = useAppSelector(selectToolUse); - const onSetToolUse = useCallback( - (value: ToolUse) => dispatch(setToolUse(value)), - [dispatch], - ); - - const showControls = useMemo( - () => messages.length === 0 && !isStreaming && !isWaiting, - [isStreaming, isWaiting, messages], - ); - - return ( - <Flex - pt="2" - pb="2" - gap="2" - direction="column" - align="start" - className={classNames(styles.controls)} - > - {Object.entries(checkboxes).map(([key, checkbox]) => { - if (host === "web" && checkbox.name === "file_upload") { - return null; - } - if (checkbox.hide === true) { - return null; - } - return ( - <ChatControlCheckBox - key={key} - name={checkbox.name} - label={checkbox.label} - checked={checkbox.checked} - disabled={checkbox.disabled} - onCheckChange={(value) => onCheckedChange(key, value)} - infoText={checkbox.info?.text} - href={checkbox.info?.link} - linkText={checkbox.info?.linkText} - fileName={checkbox.fileName} - locked={checkbox.locked} - /> - ); - })} - - {host !== "web" && ( - <Button - title="Attach current file" - onClick={attachedFiles.addFile} - disabled={!attachedFiles.activeFile.name || attachedFiles.attached} - size="1" - radius="medium" - > - Attach: {attachedFiles.activeFile.name} - </Button> - )} - - {showControls && ( - <Flex gap="2" direction="column"> - <ToolUseSwitch - ref={(x) => refs.setUseTools(x)} - toolUse={toolUse} - setToolUse={onSetToolUse} - /> - {/* <CapsSelect /> */} - <PromptSelect /> - </Flex> - )} - </Flex> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/ChatForm.module.css b/refact-agent/gui/src/components/ChatForm/ChatForm.module.css index e96b095b0..2cbc5d942 100644 --- a/refact-agent/gui/src/components/ChatForm/ChatForm.module.css +++ b/refact-agent/gui/src/components/ChatForm/ChatForm.module.css @@ -1,38 +1,90 @@ .button { - color: #000; + color: var(--gray-12); } .chatForm { - box-shadow: inset 0 0 0 1px var(--gray-a7); - border: 1px solid var(--gray-a7); + border: 1px solid var(--gray-a4); border-radius: 6px; flex-shrink: 0; overflow: hidden; - --interactive-group-height: 17px; - min-height: calc(100% + var(--interactive-group-height)); background-color: var(--color-surface); } -.chatForm__form :global(.rt-TextAreaRoot) { - background-color: unset; +/* Main chat input - transparent/subtle appearance */ +.chatFormMain { + background-color: transparent; + border: none; + box-shadow: none; } -.file { - display: block; - margin: 0; - /* overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; */ - word-wrap: break-word; - overflow-wrap: break-word; - word-break: break-word; - white-space: normal; - max-height: 16px; +/* Remove the blue focus border - keep border consistent */ +.chatFormMain:focus-within { + border-color: var(--gray-a3); + box-shadow: none; +} + +/* Remove Radix's internal focus ring/chrome that creates thick border effect */ +.chatFormMain :global(.rt-TextAreaRoot), +.chatFormMain :global(.rt-TextAreaRoot:focus), +.chatFormMain :global(.rt-TextAreaRoot:focus-within), +.chatFormMain :global(.rt-TextAreaRoot:focus-visible) { + box-shadow: none !important; + outline: none !important; } -.file_name { +.chatFormMain :global(.rt-TextAreaInput), +.chatFormMain :global(.rt-TextAreaInput:focus), +.chatFormMain :global(.rt-TextAreaInput:focus-visible) { + box-shadow: none !important; + outline: none !important; +} + +/* Remove any ScrollArea focus rings */ +.chatFormMain :global(.rt-ScrollAreaViewportFocusRing) { + display: none !important; +} + +/* Make TextArea transparent - override Radix variant-surface background */ +.chatForm :global(.rt-TextAreaRoot), +.chatForm :global(.rt-TextAreaRoot.rt-variant-surface), +.chatForm__form :global(.rt-TextAreaRoot), +.chatForm__form :global(.rt-TextAreaRoot.rt-variant-surface), +.textareaWrapper :global(.rt-TextAreaRoot), +.textareaWrapper :global(.rt-TextAreaRoot.rt-variant-surface) { + background-color: transparent !important; + background: transparent !important; + --text-area-background-color: transparent !important; + --color-surface: transparent !important; + box-shadow: none !important; + border: none !important; +} + +/* Thinner border for edit mode (RetryForm) - matches user message bubble style */ +.chatFormCompact { + border: 1px solid var(--gray-a3); + border-radius: var(--radius-3); +} + +.textareaWrapper { + position: relative; + --chat-input-padding: var(--space-3); +} + +.inputHeader { + padding: var(--space-2) var(--chat-input-padding); + padding-bottom: var(--space-2); display: flex; - line-height: 0; + flex-direction: column; + gap: var(--space-3); +} + +.inputHeader > *:empty { + display: none; +} + +.textareaWrapper :global(.rt-TextAreaInput) { + min-height: 100px; + padding: var(--chat-input-padding); } .file_name_ellipsis_rtl { diff --git a/refact-agent/gui/src/components/ChatForm/ChatForm.test.tsx b/refact-agent/gui/src/components/ChatForm/ChatForm.test.tsx index 698979e8c..5b9069419 100644 --- a/refact-agent/gui/src/components/ChatForm/ChatForm.test.tsx +++ b/refact-agent/gui/src/components/ChatForm/ChatForm.test.tsx @@ -13,6 +13,8 @@ import { noCompletions, goodPing, goodUser, + emptyTrajectories, + trajectorySave, } from "../../utils/mockServer"; const handlers = [ @@ -23,6 +25,8 @@ const handlers = [ noCommandPreview, noCompletions, goodPing, + emptyTrajectories, + trajectorySave, ]; server.use(...handlers); @@ -30,7 +34,6 @@ server.use(...handlers); const App: React.FC<Partial<ChatFormProps>> = ({ ...props }) => { const defaultProps: ChatFormProps = { onSubmit: (_str: string) => ({}), - unCalledTools: false, ...props, }; @@ -58,7 +61,7 @@ describe("ChatForm", () => { expect(fakeOnSubmit).toHaveBeenCalled(); }); - test("when I hole shift and push enter it should not call onSubmit", async () => { + test("when I hold shift and push enter it should not call onSubmit", async () => { const fakeOnSubmit = vi.fn(); const { user, ...app } = render(<App onSubmit={fakeOnSubmit} />); @@ -107,7 +110,7 @@ describe("ChatForm", () => { test.each([ "{Shift>}{enter>}{/enter}{/Shift}", // hold shift, hold enter, release enter, release shift, - "{Shift>}{enter>}{/Shift}{/enter}", // hold shift, hold enter, release enter, release shift, + "{Shift>}{enter>}{/Shift}{/enter}", // hold shift, hold enter, release enter, release shift, ])("when pressing %s, it should not submit", async (a) => { const fakeOnSubmit = vi.fn(); diff --git a/refact-agent/gui/src/components/ChatForm/ChatForm.tsx b/refact-agent/gui/src/components/ChatForm/ChatForm.tsx index 5b7b02f0a..514dcbcf3 100644 --- a/refact-agent/gui/src/components/ChatForm/ChatForm.tsx +++ b/refact-agent/gui/src/components/ChatForm/ChatForm.tsx @@ -1,30 +1,78 @@ import React, { useCallback, useEffect, useMemo } from "react"; -import { Flex, Card, Text, IconButton } from "@radix-ui/themes"; +import { Flex, Box, Text } from "@radix-ui/themes"; import styles from "./ChatForm.module.css"; +const TEXT_FILE_EXTENSIONS = new Set([ + ".txt", + ".md", + ".json", + ".yaml", + ".yml", + ".toml", + ".xml", + ".csv", + ".js", + ".ts", + ".tsx", + ".jsx", + ".py", + ".rs", + ".go", + ".java", + ".kt", + ".c", + ".cpp", + ".h", + ".hpp", + ".cs", + ".rb", + ".php", + ".swift", + ".sh", + ".bash", + ".zsh", + ".html", + ".css", + ".scss", + ".sass", + ".less", + ".sql", + ".graphql", + ".env", + ".gitignore", + ".dockerignore", +]); + +function isTextFile(filename: string): boolean { + const ext = filename.slice(filename.lastIndexOf(".")).toLowerCase(); + return TEXT_FILE_EXTENSIONS.has(ext); +} + import { BackToSideBarButton, AgentIntegrationsButton, - ThinkingButton, - ContextCapButton, - SendButtonWithDropdown, + UnifiedSendButton, } from "../Buttons"; -import { TextArea } from "../TextArea"; +import { StreamingTokenCounter, UsageCounter } from "../UsageCounter"; +import { TrajectoryButton } from "../Trajectory"; +import { TextAreaWithChips } from "../TextAreaWithChips"; +import { selectHost } from "../../features/Config/configSlice"; +import { useEventsBusForIDE } from "../../hooks"; import { Form } from "./Form"; import { useOnPressedEnter, useIsOnline, useConfig, useCapsForToolUse, - useSendChatRequest, - useCompressChat, useAutoFocusOnce, + useChatActions, } from "../../hooks"; import { ErrorCallout, Callout } from "../Callout"; import { ComboBox } from "../ComboBox"; -import { FilesPreview } from "./FilesPreview"; -import { CapsSelect, ChatControls } from "./ChatControls"; +import { UnifiedAttachmentsTray } from "./UnifiedAttachmentsTray"; +import { ChatSettingsDropdown } from "./ChatSettingsDropdown"; +import { ModeSelect } from "./ModeSelect"; import { addCheckboxValuesToInput } from "./utils"; import { useCommandCompletionAndPreviewFiles } from "./useCommandCompletionAndPreviewFiles"; import { useAppSelector, useAppDispatch } from "../../hooks"; @@ -33,7 +81,6 @@ import { getErrorMessage, getErrorType, } from "../../features/Errors/errorsSlice"; -import { useTourRefs } from "../../features/Tour"; import { useAttachedFiles, useCheckboxes } from "./useCheckBoxes"; import { useInputValue } from "./useInputValue"; import { @@ -47,25 +94,30 @@ import { InformationCallout, } from "../Callout/Callout"; import { ToolConfirmation } from "./ToolConfirmation"; -import { getPauseReasonsWithPauseStatus } from "../../features/ToolConfirmation/confirmationSlice"; -import { AttachImagesButton, FileList } from "../Dropzone"; +import { selectThreadConfirmation } from "../../features/Chat"; +import { AttachImagesButton } from "../Dropzone"; +import { MicrophoneButton, MicrophoneButtonRef } from "./MicrophoneButton"; import { useAttachedImages } from "../../hooks/useAttachedImages"; import { + clearChatError, selectChatError, + selectCurrentThreadId, selectIsStreaming, selectIsWaiting, - selectLastSentCompression, selectMessages, - selectQueuedMessages, - selectThreadToolUse, - selectToolUse, + selectQueuedItems, + selectThreadImages, + selectThreadMode, + setThreadMode, + DEFAULT_MODE, } from "../../features/Chat"; import { telemetryApi } from "../../services/refact"; import { push } from "../../features/Pages/pagesSlice"; -import { AgentCapabilities } from "./AgentCapabilities/AgentCapabilities"; -import { TokensPreview } from "./TokensPreview"; + +import { useUsageCounter } from "../UsageCounter/useUsageCounter"; +import { ChatInputTopControls } from "./ChatInputTopControls"; + import classNames from "classnames"; -import { ArchiveIcon } from "@radix-ui/react-icons"; export type SendPolicy = "immediate" | "after_flow"; @@ -73,52 +125,63 @@ export type ChatFormProps = { onSubmit: (str: string, sendPolicy?: SendPolicy) => void; onClose?: () => void; className?: string; - unCalledTools: boolean; }; export const ChatForm: React.FC<ChatFormProps> = ({ onSubmit, onClose, className, - unCalledTools, }) => { const dispatch = useAppDispatch(); const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); - const { isMultimodalitySupportedForCurrentModel } = useCapsForToolUse(); + const caps = useCapsForToolUse(); + const { isMultimodalitySupportedForCurrentModel } = caps; const config = useConfig(); - const toolUse = useAppSelector(selectToolUse); + const host = useAppSelector(selectHost); + const { queryPathThenOpenFile } = useEventsBusForIDE(); const globalError = useAppSelector(getErrorMessage); const globalErrorType = useAppSelector(getErrorType); const chatError = useAppSelector(selectChatError); + const chatId = useAppSelector(selectCurrentThreadId); const information = useAppSelector(getInformationMessage); - const pauseReasonsWithPause = useAppSelector(getPauseReasonsWithPauseStatus); + const pauseReasonsWithPause = useAppSelector(selectThreadConfirmation); const [helpInfo, setHelpInfo] = React.useState<React.ReactNode | null>(null); + const [isVoiceActive, setIsVoiceActive] = React.useState(false); + const [liveTranscript, setLiveTranscript] = React.useState(""); + const [inputResetKey, setInputResetKey] = React.useState(0); const isOnline = useIsOnline(); - const { retry } = useSendChatRequest(); - - const threadToolUse = useAppSelector(selectThreadToolUse); + const { isContextFull } = useUsageCounter(); const messages = useAppSelector(selectMessages); - const lastSentCompression = useAppSelector(selectLastSentCompression); - const queuedMessages = useAppSelector(selectQueuedMessages); - const { compressChat, compressChatRequest, isCompressing } = - useCompressChat(); + const queuedItems = useAppSelector(selectQueuedItems); + const threadMode = useAppSelector(selectThreadMode); const autoFocus = useAutoFocusOnce(); + const { abort, regenerate } = useChatActions(); + + const onSetMode = useCallback( + ( + modeId: string, + threadDefaults?: Parameters<typeof setThreadMode>[0]["threadDefaults"], + ) => { + if (chatId) { + dispatch(setThreadMode({ chatId, mode: modeId, threadDefaults })); + } + }, + [dispatch, chatId], + ); + + const isModeDisabled = useMemo(() => isStreaming, [isStreaming]); const attachedFiles = useAttachedFiles(); const shouldShowBalanceLow = useAppSelector(showBalanceLowCallout); - - const shouldAgentCapabilitiesBeShown = useMemo(() => { - return threadToolUse === "agent"; - }, [threadToolUse]); + const attachedImages = useAppSelector(selectThreadImages); + const microphoneRef = React.useRef<MicrophoneButtonRef>(null); const onClearError = useCallback(() => { - if (messages.length > 0 && chatError) { - retry(messages); - } dispatch(clearError()); - }, [dispatch, retry, messages, chatError]); - - const caps = useCapsForToolUse(); + if (chatId) { + dispatch(clearChatError({ id: chatId })); + } + }, [dispatch, chatId]); const allDisabled = caps.usableModelsForPlan.every((option) => { if (typeof option === "string") return false; @@ -126,39 +189,68 @@ export const ChatForm: React.FC<ChatFormProps> = ({ }); const disableSend = useMemo(() => { - // TODO: if interrupting chat some errors can occur if (allDisabled) return true; - // if ( - // currentThreadMaximumContextTokens && - // currentThreadUsage?.prompt_tokens && - // currentThreadUsage.prompt_tokens > currentThreadMaximumContextTokens - // ) - // return false; - // if (arePromptTokensBiggerThanContext) return true; if (messages.length === 0) return false; + if (isContextFull) return true; return isWaiting || isStreaming || !isOnline; - }, [allDisabled, messages.length, isWaiting, isStreaming, isOnline]); + }, [ + allDisabled, + messages.length, + isWaiting, + isStreaming, + isOnline, + isContextFull, + ]); - const isModelSelectVisible = useMemo(() => messages.length < 1, [messages]); + const disableMicrophone = useMemo(() => { + if (allDisabled) return true; + if (isContextFull) return true; + if (!isOnline) return true; + return false; + }, [allDisabled, isContextFull, isOnline]); - const { processAndInsertImages } = useAttachedImages(); + const { + processAndInsertImages, + processAndInsertTextFiles, + textFiles, + resetAllTextFiles, + } = useAttachedImages(); const handlePastingFile = useCallback( (event: React.ClipboardEvent<HTMLTextAreaElement>) => { - if (!isMultimodalitySupportedForCurrentModel) return; - const files: File[] = []; + const imageFiles: File[] = []; + const textFilesList: File[] = []; const items = event.clipboardData.items; + for (const item of items) { if (item.kind === "file") { const file = item.getAsFile(); - file && files.push(file); + if (file) { + if (file.type === "image/jpeg" || file.type === "image/png") { + if (isMultimodalitySupportedForCurrentModel) { + imageFiles.push(file); + } + } else if (file.type.startsWith("text/") || isTextFile(file.name)) { + textFilesList.push(file); + } + } } } - if (files.length > 0) { + + if (imageFiles.length > 0 || textFilesList.length > 0) { event.preventDefault(); - processAndInsertImages(files); + if (imageFiles.length > 0) { + processAndInsertImages(imageFiles); + } + if (textFilesList.length > 0) { + processAndInsertTextFiles(textFilesList); + } } }, - [processAndInsertImages, isMultimodalitySupportedForCurrentModel], + [ + processAndInsertImages, + processAndInsertTextFiles, + isMultimodalitySupportedForCurrentModel, + ], ); const { @@ -174,6 +266,9 @@ export const ChatForm: React.FC<ChatFormProps> = ({ const [value, setValue, isSendImmediately, setIsSendImmediately] = useInputValue(() => unCheckAll()); + const valueRef = React.useRef(value); + valueRef.current = value; + const onClearInformation = useCallback( () => dispatch(clearInformation()), [dispatch], @@ -185,35 +280,45 @@ export const ChatForm: React.FC<ChatFormProps> = ({ attachedFiles.addFilesToInput, ); - const refs = useTourRefs(); - const handleSubmit = useCallback( (sendPolicy: SendPolicy = "after_flow") => { const trimmedValue = value.trim(); - // Both options queue during streaming, so both should be allowed - const canSubmit = trimmedValue.length > 0 && isOnline && !allDisabled; + const hasImages = attachedImages.length > 0; + const hasTextFiles = textFiles.length > 0; + const canSubmit = + (trimmedValue.length > 0 || hasImages || hasTextFiles) && + isOnline && + !allDisabled; if (canSubmit) { const valueWithFiles = attachedFiles.addFilesToInput(trimmedValue); + const valueWithTextFiles = textFiles.reduce((acc, file) => { + const ext = file.name.split(".").pop() ?? ""; + return `\`\`\`${ext} ${file.name}\n${file.content}\n\`\`\`\n\n${acc}`; + }, valueWithFiles); const valueIncludingChecks = addCheckboxValuesToInput( - valueWithFiles, + valueWithTextFiles, checkboxes, ); - // TODO: add @files setLineSelectionInteracted(false); onSubmit(valueIncludingChecks, sendPolicy); - setValue(() => ""); + setValue(""); + setInputResetKey((k) => k + 1); unCheckAll(); attachedFiles.removeAll(); + resetAllTextFiles(); } }, [ value, allDisabled, isOnline, + attachedImages, + textFiles, attachedFiles, checkboxes, setLineSelectionInteracted, + resetAllTextFiles, onSubmit, setValue, unCheckAll, @@ -266,7 +371,7 @@ export const ChatForm: React.FC<ChatFormProps> = ({ } if (trimmedCommand === "@help") { - handleHelpInfo(helpText()); // This line has been fixed + handleHelpInfo(helpText()); } else { handleHelpInfo(null); } @@ -296,6 +401,34 @@ export const ChatForm: React.FC<ChatFormProps> = ({ setIsSendImmediately, ]); + const handleLiveTranscript = useCallback((text: string) => { + setLiveTranscript(text); + }, []); + + const handleRecordingChange = useCallback( + (isRecording: boolean, isFinishing: boolean) => { + setIsVoiceActive(isRecording || isFinishing); + if (!isRecording && !isFinishing) { + setLiveTranscript(""); + } + }, + [], + ); + + useEffect(() => { + const handleKeyDown = (event: KeyboardEvent) => { + if (event.ctrlKey && event.shiftKey && event.code === "Space") { + event.preventDefault(); + if (!disableMicrophone && microphoneRef.current) { + void microphoneRef.current.toggleRecording(); + } + } + }; + + window.addEventListener("keydown", handleKeyDown); + return () => window.removeEventListener("keydown", handleKeyDown); + }, [disableMicrophone]); + if (globalError) { return ( <Flex direction="column" mt="2" gap="2"> @@ -324,14 +457,14 @@ export const ChatForm: React.FC<ChatFormProps> = ({ ); } - if (!isStreaming && pauseReasonsWithPause.pause) { + if (pauseReasonsWithPause.pause) { return ( - <ToolConfirmation pauseReasons={pauseReasonsWithPause.pauseReasons} /> + <ToolConfirmation pauseReasons={pauseReasonsWithPause.pause_reasons} /> ); } return ( - <Card mt="1" style={{ flexShrink: 0, position: "relative" }}> + <Box style={{ flexShrink: 0, position: "relative" }}> {globalErrorType === "balance" && ( <BallanceCallOut mt="0" @@ -348,9 +481,7 @@ export const ChatForm: React.FC<ChatFormProps> = ({ )} <Flex - ref={(x) => refs.setChat(x)} style={{ - // TODO: direction can be done with prop `direction` flexDirection: "column", alignSelf: "stretch", flex: 1, @@ -362,116 +493,128 @@ export const ChatForm: React.FC<ChatFormProps> = ({ {helpInfo} </Flex> )} - {shouldAgentCapabilitiesBeShown && <AgentCapabilities />} <Form disabled={disableSend} - className={classNames(styles.chatForm__form, className)} + className={classNames( + styles.chatForm, + styles.chatForm__form, + styles.chatFormMain, + className, + )} onSubmit={() => handleSubmit("after_flow")} > - <FilesPreview files={previewFiles} /> - - <ComboBox - onHelpClick={handleHelpCommand} - commands={commands} - requestCommandsCompletion={requestCompletion} - value={value} - onChange={handleChange} - onSubmit={(event) => { - handleEnter(event); - }} - placeholder={ - commands.completions.length < 1 ? "Type @ for commands" : "" - } - render={(props) => ( - <TextArea - data-testid="chat-form-textarea" - required={true} - // disabled={isStreaming} - {...props} - autoFocus={autoFocus} - style={{ boxShadow: "none", outline: "none" }} - onPaste={handlePastingFile} + <Box className={styles.textareaWrapper}> + <Box className={styles.inputHeader}> + <UnifiedAttachmentsTray + attachedFiles={attachedFiles} + previewFiles={previewFiles} + onOpenFile={queryPathThenOpenFile} /> - )} - /> - <Flex gap="2" wrap="wrap" py="1" px="2" align="center"> - {isModelSelectVisible && <CapsSelect />} - <ContextCapButton /> - - <Flex justify="end" flexGrow="1" wrap="wrap" gap="2"> - <ThinkingButton /> - <TokensPreview - currentMessageQuery={attachedFiles.addFilesToInput(value)} - /> - <Flex gap="2" align="center" justify="center"> - <IconButton - size="1" - variant="ghost" - color={ - lastSentCompression === "high" - ? "red" - : lastSentCompression === "medium" - ? "yellow" - : undefined - } - title="Summarize and continue in a new chat" - type="button" - onClick={() => void compressChat()} - disabled={ - messages.length === 0 || - isStreaming || - isWaiting || - unCalledTools - } - loading={compressChatRequest.isLoading || isCompressing} - > - <ArchiveIcon /> - </IconButton> - {toolUse === "agent" && ( - <AgentIntegrationsButton - title="Set up Agent Integrations" - size="1" - type="button" - onClick={handleAgentIntegrationsClick} - ref={(x) => refs.setSetupIntegrations(x)} - /> - )} - {onClose && ( - <BackToSideBarButton - disabled={isStreaming} - title="Return to sidebar" - size="1" - onClick={onClose} - /> - )} - {config.features?.images !== false && - isMultimodalitySupportedForCurrentModel && ( - <AttachImagesButton /> - )} - {/* TODO: Reserved space for microphone button coming later on */} - <SendButtonWithDropdown - disabled={ - !isOnline || allDisabled || value.trim().length === 0 - } - isStreaming={isStreaming || isWaiting} - queuedCount={queuedMessages.length} - onSend={() => handleSubmit("after_flow")} - onSendImmediately={handleSendImmediately} + <Flex align="center" gap="2" justify="between" wrap="wrap"> + <ChatInputTopControls + checkboxes={checkboxes} + onCheckedChange={onToggleCheckbox} + attachedFiles={attachedFiles} /> + <Flex align="center" gap="2"> + <StreamingTokenCounter /> + <UsageCounter /> + <TrajectoryButton /> + </Flex> </Flex> + </Box> + + <ComboBox + key={inputResetKey} + onHelpClick={handleHelpCommand} + commands={commands} + requestCommandsCompletion={requestCompletion} + value={ + isVoiceActive && liveTranscript + ? value.trim() + ? `${value}\n${liveTranscript}` + : liveTranscript + : value + } + onChange={handleChange} + onSubmit={(event) => { + handleEnter(event); + }} + placeholder={ + isVoiceActive + ? "Listening..." + : commands.completions.length < 1 + ? "Type @ for commands" + : "" + } + render={(props) => ( + <TextAreaWithChips + data-testid="chat-form-textarea" + required={true} + {...props} + host={host} + onOpenFile={queryPathThenOpenFile} + autoFocus={autoFocus} + readOnly={isVoiceActive} + style={{ boxShadow: "none", outline: "none" }} + onPaste={handlePastingFile} + /> + )} + /> + </Box> + <Flex gap="2" wrap="wrap" py="2" px="3" align="center"> + <ChatSettingsDropdown /> + <ModeSelect + selectedMode={threadMode ?? DEFAULT_MODE} + onModeChange={onSetMode} + disabled={isModeDisabled} + /> + + <Flex justify="end" flexGrow="1" wrap="wrap" gap="2" align="center"> + <AgentIntegrationsButton + title="Set up Agent Integrations" + onClick={handleAgentIntegrationsClick} + /> + {onClose && ( + <BackToSideBarButton + disabled={isStreaming} + title="Return to sidebar" + onClick={onClose} + /> + )} + {config.features?.images !== false && + isMultimodalitySupportedForCurrentModel && ( + <AttachImagesButton /> + )} + <MicrophoneButton + ref={microphoneRef} + onTranscript={(text) => { + setValue((prev) => { + if (prev.trim()) { + return `${prev}\n${text}`; + } + return text; + }); + }} + onLiveTranscript={handleLiveTranscript} + onRecordingChange={handleRecordingChange} + disabled={disableMicrophone} + /> + <UnifiedSendButton + disabled={isVoiceActive || !isOnline || allDisabled} + isStreaming={isStreaming || isWaiting} + hasText={value.trim().length > 0 || attachedImages.length > 0} + hasMessages={messages.length > 0} + queuedCount={queuedItems.length} + onSend={() => handleSubmit("after_flow")} + onSendImmediately={handleSendImmediately} + onStop={() => void abort()} + onResend={() => void regenerate()} + /> </Flex> </Flex> </Form> </Flex> - <FileList attachedFiles={attachedFiles} /> - - <ChatControls - // handle adding files - host={config.host} - checkboxes={checkboxes} - onCheckedChange={onToggleCheckbox} - attachedFiles={attachedFiles} - /> - </Card> + </Box> ); }; diff --git a/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.module.css b/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.module.css new file mode 100644 index 000000000..2679260e0 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.module.css @@ -0,0 +1,56 @@ +/* Use shared iconButton with variants */ +.iconButton { + composes: iconButton from "../shared/iconButton.module.css"; +} + +.active { + composes: active from "../shared/iconButton.module.css"; +} + +.danger { + composes: danger from "../shared/iconButton.module.css"; +} + +.divider { + color: var(--gray-8); + font-size: 12px; + user-select: none; + margin: 0 var(--space-1); +} + +.selectedLinesGroup { + height: 24px; +} + +.lockButton, +.helpButton { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + height: 20px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-10); + cursor: pointer; + transition: filter 0.15s ease; + flex-shrink: 0; +} + +.lockButton svg, +.helpButton svg { + width: 14px; + height: 14px; +} + +.lockButton:hover:not(:disabled), +.helpButton:hover:not(:disabled) { + filter: brightness(1.5); +} + +.lockButton:disabled, +.helpButton:disabled { + opacity: 0.4; + cursor: not-allowed; +} diff --git a/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.tsx b/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.tsx new file mode 100644 index 000000000..950e6a9aa --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ChatInputTopControls.tsx @@ -0,0 +1,228 @@ +import React, { useCallback, useState } from "react"; +import { Flex, Text, HoverCard } from "@radix-ui/themes"; +import { + InfoCircledIcon, + LockClosedIcon, + LockOpen1Icon, + QuestionMarkCircledIcon, + Pencil2Icon, + ExclamationTriangleIcon, + PlusIcon, +} from "@radix-ui/react-icons"; +import styles from "./ChatInputTopControls.module.css"; +import classNames from "classnames"; +import { useAppDispatch, useAppSelector } from "../../hooks"; +import { + selectAutoApproveEditingTools, + selectAutoApproveDangerousCommands, + selectCurrentThreadId, + selectIncludeProjectInfo, +} from "../../features/Chat"; +import { + setAutoApproveEditingTools, + setAutoApproveDangerousCommands, +} from "../../features/Chat/Thread/actions"; +import { ProjectInformationDialog } from "./ProjectInformationDialog"; +import { selectHost } from "../../features/Config/configSlice"; +import { Checkbox } from "../Checkbox"; +import type { Checkbox as CheckboxType } from "./useCheckBoxes"; +import type { useAttachedFiles } from "./useCheckBoxes"; + +export type ChatInputTopControlsProps = { + checkboxes: Record<string, CheckboxType>; + onCheckedChange: (name: string, checked: boolean | string) => void; + attachedFiles: ReturnType<typeof useAttachedFiles>; +}; + +export const ChatInputTopControls: React.FC<ChatInputTopControlsProps> = ({ + checkboxes, + onCheckedChange, + attachedFiles, +}) => { + const dispatch = useAppDispatch(); + const host = useAppSelector(selectHost); + const chatId = useAppSelector(selectCurrentThreadId); + const autoApproveEditing = useAppSelector(selectAutoApproveEditingTools); + const autoApproveDangerous = useAppSelector( + selectAutoApproveDangerousCommands, + ); + const includeProjectInfo = useAppSelector(selectIncludeProjectInfo); + const [dialogOpen, setDialogOpen] = useState(false); + + const handleEditingChange = useCallback( + (checked: boolean) => { + if (chatId) { + dispatch(setAutoApproveEditingTools({ chatId, value: checked })); + } + }, + [dispatch, chatId], + ); + + const handleDangerousChange = useCallback( + (checked: boolean) => { + if (chatId) { + dispatch(setAutoApproveDangerousCommands({ chatId, value: checked })); + } + }, + [dispatch, chatId], + ); + + const selectedLinesCheckbox = checkboxes.selected_lines; + const showSelectedLines = host !== "web" && !selectedLinesCheckbox.hide; + const showAttachButton = host !== "web" && attachedFiles.activeFile.name; + + return ( + <> + <Flex gap="1" align="center" wrap="wrap"> + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={() => setDialogOpen(true)} + aria-label="Configure project information" + className={classNames( + styles.iconButton, + includeProjectInfo && styles.active, + )} + > + <InfoCircledIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Project info: {includeProjectInfo ? "ON" : "OFF"} + </Text> + </HoverCard.Content> + </HoverCard.Root> + + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={() => handleEditingChange(!autoApproveEditing)} + disabled={!chatId} + aria-label="Auto-approve file editing tools" + aria-pressed={autoApproveEditing} + className={classNames( + styles.iconButton, + autoApproveEditing && styles.active, + )} + > + <Pencil2Icon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Auto-approve edits: {autoApproveEditing ? "ON" : "OFF"} + </Text> + </HoverCard.Content> + </HoverCard.Root> + + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={() => handleDangerousChange(!autoApproveDangerous)} + disabled={!chatId} + aria-label="Auto-approve dangerous commands" + aria-pressed={autoApproveDangerous} + className={classNames( + styles.iconButton, + autoApproveDangerous && styles.danger, + )} + > + <ExclamationTriangleIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Auto-approve dangerous: {autoApproveDangerous ? "ON" : "OFF"} + </Text> + </HoverCard.Content> + </HoverCard.Root> + + {showSelectedLines && ( + <> + <span className={styles.divider}>|</span> + <Flex align="center" gap="1" className={styles.selectedLinesGroup}> + <Checkbox + size="1" + name={selectedLinesCheckbox.name} + checked={selectedLinesCheckbox.checked} + disabled={selectedLinesCheckbox.disabled} + onCheckedChange={(value) => + onCheckedChange(selectedLinesCheckbox.name, value) + } + > + <Text size="1">{selectedLinesCheckbox.label}</Text> + </Checkbox> + <button + type="button" + className={styles.lockButton} + onClick={() => + onCheckedChange( + selectedLinesCheckbox.name, + !selectedLinesCheckbox.checked, + ) + } + disabled={selectedLinesCheckbox.disabled} + aria-label={ + selectedLinesCheckbox.locked ? "Locked" : "Unlocked" + } + > + {selectedLinesCheckbox.locked && <LockClosedIcon />} + {selectedLinesCheckbox.locked === false && <LockOpen1Icon />} + </button> + {selectedLinesCheckbox.info && ( + <HoverCard.Root> + <HoverCard.Trigger> + <button type="button" className={styles.helpButton}> + <QuestionMarkCircledIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content maxWidth="240px" size="1"> + <Text as="div" size="1"> + {selectedLinesCheckbox.info.text} + </Text> + </HoverCard.Content> + </HoverCard.Root> + )} + </Flex> + </> + )} + + {showAttachButton && ( + <> + <span className={styles.divider}>|</span> + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={attachedFiles.addFile} + disabled={attachedFiles.attached} + aria-label={`Attach ${attachedFiles.activeFile.name}`} + className={classNames( + styles.iconButton, + attachedFiles.attached && styles.active, + )} + > + <PlusIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Attach: {attachedFiles.activeFile.name} + </Text> + </HoverCard.Content> + </HoverCard.Root> + </> + )} + </Flex> + + <ProjectInformationDialog + open={dialogOpen} + onOpenChange={setDialogOpen} + /> + </> + ); +}; diff --git a/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.module.css b/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.module.css new file mode 100644 index 000000000..28676947c --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.module.css @@ -0,0 +1,259 @@ +.trigger { + display: flex; + align-items: center; + gap: var(--space-1); + padding: var(--space-1) var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + color: var(--gray-12); + transition: background-color 0.15s ease; +} + +.trigger:hover:not(.disabled) { + background-color: var(--gray-a3); +} + +.trigger.disabled { + opacity: 0.5; + cursor: default; +} + +.triggerContent { + max-width: 320px; +} + +.modelName { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + max-width: 140px; +} + +.chevron { + flex-shrink: 0; + color: var(--gray-9); + width: 12px; + height: 12px; +} + +.content { + min-width: 300px; + max-width: 400px; + max-height: 600px; + overflow: visible; + padding: var(--space-2); +} + +.contentAdvancedOpen { + max-height: 780px; +} + +.modelSection { + overflow: hidden; +} + +.section { + padding: var(--space-2) 0; +} + +.section:first-child { + padding-top: 0; +} + +.section:last-child { + padding-bottom: 0; +} + +.sectionHeader { + display: block; + text-transform: uppercase; + letter-spacing: 0.5px; + font-size: 10px; +} + +.groupHeader { + display: block; + padding: var(--space-1) var(--space-2); + font-size: 10px; + text-transform: uppercase; + letter-spacing: 0.3px; + opacity: 0.7; +} + +.groupSeparator { + margin: var(--space-1) 0; +} + +.modelList { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + display: flex; + flex-direction: column; + max-height: 220px; + overflow-y: auto; +} + +/* When the outer popover grows, let the list grow too, + reducing the chance of nested scrollbars. */ +.contentAdvancedOpen .modelList { + max-height: 320px; +} + +.item { + display: flex; + align-items: center; + gap: var(--space-2); + padding: var(--space-1) var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + text-align: left; + width: 100%; + transition: background-color 0.15s ease; +} + +.item:hover:not(:disabled):not(.itemDisabled) { + background-color: var(--gray-a3); +} + +.item:disabled, +.item.itemDisabled { + opacity: 0.5; + cursor: default; +} + +.itemSelected { + background-color: var(--accent-a3); +} + +.itemSelected:hover:not(:disabled):not(.itemDisabled) { + background-color: var(--accent-a4); +} + +.itemModelName { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + font-family: monospace; + font-size: 11px; +} + +.badge { + font-size: 9px; + flex-shrink: 0; +} + +.sliderContainer { + padding: 0 var(--space-1); +} + +.slider { + flex: 1; +} + +.advancedTrigger { + display: flex; + align-items: center; + width: 100%; + padding: var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + color: var(--gray-11); + transition: background-color 0.15s ease; +} + +.advancedTrigger:hover:not(:disabled) { + background-color: var(--gray-a3); +} + +.advancedTrigger:disabled { + opacity: 0.5; + cursor: default; +} + +.advancedChevron { + width: 12px; + height: 12px; + transition: transform 0.15s ease; +} + +.advancedChevronOpen { + transform: rotate(90deg); +} + +.advancedContent { + padding: var(--space-2); + padding-top: 0; +} + +.advancedRow { + padding: var(--space-2) 0; +} + +.advancedRow:first-child { + padding-top: var(--space-1); +} + +.resetButton { + display: flex; + align-items: center; + justify-content: center; + width: 16px; + height: 16px; + padding: 0; + background: transparent; + border: none; + border-radius: var(--radius-1); + cursor: pointer; + color: var(--gray-9); + font-size: 10px; + line-height: 1; + transition: + background-color 0.15s ease, + color 0.15s ease; +} + +.resetButton:hover:not(:disabled) { + background-color: var(--gray-a4); + color: var(--gray-12); +} + +.resetButton:disabled { + opacity: 0.5; + cursor: default; +} + +.effortButton { + padding: var(--space-1) var(--space-2); + background: var(--gray-a3); + border: 1px solid var(--gray-a6); + border-radius: var(--radius-2); + cursor: pointer; + color: var(--gray-11); + transition: all 0.15s ease; +} + +.effortButton:hover:not(:disabled) { + background: var(--gray-a4); + border-color: var(--gray-a8); +} + +.effortButton:disabled { + opacity: 0.5; + cursor: default; +} + +.effortButtonActive { + background: var(--accent-a4); + border-color: var(--accent-a8); + color: var(--accent-11); +} + +.effortButtonActive:hover:not(:disabled) { + background: var(--accent-a5); + border-color: var(--accent-a9); +} diff --git a/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.tsx b/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.tsx new file mode 100644 index 000000000..8230bad7c --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ChatSettingsDropdown.tsx @@ -0,0 +1,776 @@ +import React, { + useCallback, + useMemo, + useState, + useRef, + useEffect, +} from "react"; +import { + Flex, + Text, + Popover, + Separator, + Skeleton, + Slider, + Badge, + Switch, + Callout, +} from "@radix-ui/themes"; +import { ChevronDownIcon, ChevronRightIcon } from "@radix-ui/react-icons"; +import * as Collapsible from "@radix-ui/react-collapsible"; +import { useAppSelector, useAppDispatch, useCapsForToolUse } from "../../hooks"; +import { useGetCapsQuery, CapCost } from "../../services/refact/caps"; +import { + selectChatId, + selectContextTokensCap, + selectModel, + selectMessages, + selectIsStreaming, + selectIsWaiting, + selectThreadBoostReasoning, + selectReasoningEffort, + selectThinkingBudget, + selectTemperature, + selectMaxTokens, + setContextTokensCap, + setReasoningEffort, + setThinkingBudget, + setTemperature, + setMaxTokens, +} from "../../features/Chat/Thread"; +import type { ReasoningEffort } from "../../features/Chat/Thread/types"; +import { push } from "../../features/Pages/pagesSlice"; +import { enrichAndGroupModels } from "../../utils/enrichModels"; +import { useThinking } from "../../hooks/useThinking"; +import { formatContextWindow } from "../../features/Providers/ProviderForm/ProviderModelsList/utils/groupModelsWithPricing"; +import styles from "./ChatSettingsDropdown.module.css"; + +const CAP_STEPS = [16000, 32000, 64000, 128000, 200000, 256000]; +const MIN_CAP = 16000; + +function formatTokens(tokens: number): string { + if (tokens >= 1000000) { + return `${(tokens / 1000000).toFixed(tokens % 1000000 === 0 ? 0 : 1)}M`; + } + return `${Math.round(tokens / 1000)}K`; +} + +function formatUsdPrice(price: number | undefined): string { + if (typeof price !== "number" || !Number.isFinite(price)) return "–"; + if (price >= 100) { + return `$${price.toFixed(0)}`; + } + if (price >= 10) { + return `$${price.toFixed(1)}`; + } + return `$${price.toFixed(2)}`; +} + +function formatPricingDetailed(cost: CapCost): { + prompt: string; + output: string; +} { + return { + prompt: formatUsdPrice(cost.prompt), + output: formatUsdPrice(cost.generated), + }; +} + +function getSliderSteps(maxTokens: number): number[] { + const steps = CAP_STEPS.filter((s) => s <= maxTokens); + if (!steps.includes(maxTokens)) { + steps.push(maxTokens); + } + return steps.sort((a, b) => a - b); +} + +function valueToSliderPosition(value: number, steps: number[]): number { + const idx = steps.findIndex((s) => s >= value); + if (idx === -1) return steps.length - 1; + if (steps[idx] === value) return idx; + if (idx === 0) return 0; + const prev = steps[idx - 1]; + const next = steps[idx]; + const ratio = (value - prev) / (next - prev); + return idx - 1 + ratio; +} + +function sliderPositionToValue(position: number, steps: number[]): number { + const idx = Math.floor(position); + if (idx >= steps.length - 1) return steps[steps.length - 1]; + const frac = position - idx; + if (frac === 0) return steps[idx]; + return Math.round(steps[idx] + frac * (steps[idx + 1] - steps[idx])); +} + +export const ChatSettingsDropdown: React.FC = () => { + const dispatch = useAppDispatch(); + const chatId = useAppSelector(selectChatId); + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + const contextCap = useAppSelector(selectContextTokensCap); + const threadModel = useAppSelector(selectModel); + const messages = useAppSelector(selectMessages); + const isBoostReasoningEnabled = useAppSelector(selectThreadBoostReasoning); + const threadTemperature = useAppSelector(selectTemperature); + const threadMaxTokens = useAppSelector(selectMaxTokens); + const threadReasoningEffort = useAppSelector(selectReasoningEffort); + const threadThinkingBudget = useAppSelector(selectThinkingBudget); + const hasAnyReasoningConfigured = + (isBoostReasoningEnabled ?? false) || + threadReasoningEffort != null || + threadThinkingBudget != null; + + const caps = useCapsForToolUse(); + const capsQuery = useGetCapsQuery(undefined); + + const { + handleReasoningChange, + shouldBeDisabled: thinkingDisabled, + supportsBoostReasoning, + areCapsInitialized, + } = useThinking(); + + const isInteractionDisabled = isStreaming || isWaiting; + + // Model data + const currentModelName = caps.currentModel || "Select model"; + const [isOpen, setIsOpen] = useState(false); + const [advancedOpen, setAdvancedOpen] = useState(false); + const selectedModelRef = useRef<HTMLButtonElement>(null); + const modelListRef = useRef<HTMLDivElement>(null); + + const groupedModels = useMemo(() => { + return enrichAndGroupModels(caps.usableModelsForPlan, caps.data); + }, [caps.usableModelsForPlan, caps.data]); + + useEffect(() => { + if (!isOpen) return; + + const scrollToSelected = () => { + const container = modelListRef.current; + const selected = selectedModelRef.current; + if (container && selected && container.clientHeight > 0) { + const containerHeight = container.clientHeight; + const selectedTop = selected.offsetTop; + const selectedHeight = selected.offsetHeight; + container.scrollTop = + selectedTop - containerHeight / 2 + selectedHeight / 2; + return true; + } + return false; + }; + + let attempts = 0; + const maxAttempts = 10; + const tryScroll = () => { + if (scrollToSelected() || attempts >= maxAttempts) return; + attempts++; + requestAnimationFrame(tryScroll); + }; + + requestAnimationFrame(tryScroll); + }, [isOpen]); + + const selectedModelDetail = useMemo(() => { + if (!caps.currentModel) return null; + const data = capsQuery.data; + if (!data?.chat_models) return null; + const modelData = data.chat_models[caps.currentModel] as + | { + n_ctx: number; + default_temperature?: number; + default_max_tokens?: number; + max_output_tokens?: number; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; + } + | undefined; + if (!modelData) return null; + const pricing = + data.metadata?.pricing?.[caps.currentModel.replace(/^refact\//, "")]; + return { + nCtx: modelData.n_ctx, + defaultTemperature: modelData.default_temperature, + defaultMaxTokens: modelData.default_max_tokens, + maxOutputTokens: modelData.max_output_tokens, + reasoningEffortOptions: modelData.reasoning_effort_options, + supportsThinkingBudget: modelData.supports_thinking_budget, + supportsAdaptiveThinkingBudget: + modelData.supports_adaptive_thinking_budget, + pricing: pricing ? formatPricingDetailed(pricing) : null, + }; + }, [caps.currentModel, capsQuery.data]); + + const maxTokens = useMemo(() => { + const chatModels = capsQuery.data?.chat_models; + if (!chatModels || !threadModel) return 0; + if (!Object.prototype.hasOwnProperty.call(chatModels, threadModel)) + return 0; + return chatModels[threadModel].n_ctx; + }, [capsQuery.data, threadModel]); + + const sliderSteps = useMemo(() => getSliderSteps(maxTokens), [maxTokens]); + + const effectiveCap = useMemo(() => { + if (!contextCap || contextCap > maxTokens) return maxTokens; + if (contextCap < MIN_CAP) return MIN_CAP; + return contextCap; + }, [contextCap, maxTokens]); + + const [localSliderValue, setLocalSliderValue] = useState<number | null>(null); + const displayCap = localSliderValue ?? effectiveCap; + + const [localTemperature, setLocalTemperature] = useState<number | null>(null); + const [localThinkingBudget, setLocalThinkingBudget] = useState<number | null>( + null, + ); + const [localMaxTokens, setLocalMaxTokens] = useState<number | null>(null); + const displayTemperature = localTemperature ?? threadTemperature; + const displayThinkingBudget = localThinkingBudget ?? threadThinkingBudget; + const displayMaxTokens = localMaxTokens ?? threadMaxTokens; + + const isStartedChat = messages.length > 0; + + useEffect(() => { + setLocalSliderValue(null); + setLocalTemperature(null); + setLocalThinkingBudget(null); + setLocalMaxTokens(null); + }, [chatId]); + + useEffect(() => { + if (!isOpen) { + setLocalSliderValue(null); + setLocalTemperature(null); + setLocalThinkingBudget(null); + setLocalMaxTokens(null); + } + }, [isOpen]); + + // Handlers + const handleModelSelect = useCallback( + (modelValue: string) => { + if (modelValue === "add-new-model") { + dispatch(push({ name: "providers page" })); + return; + } + caps.setCapModel(modelValue); + }, + [caps, dispatch], + ); + + const handleSliderChange = useCallback( + (values: number[]) => { + const newValue = sliderPositionToValue(values[0], sliderSteps); + setLocalSliderValue(newValue); + }, + [sliderSteps], + ); + + const handleSliderCommit = useCallback( + (values: number[]) => { + const newValue = sliderPositionToValue(values[0], sliderSteps); + dispatch(setContextTokensCap({ chatId, value: newValue })); + setLocalSliderValue(null); + }, + [dispatch, chatId, sliderSteps], + ); + + const noop = useCallback(() => { + /* intentionally empty */ + }, []); + const handleThinkingToggle = useCallback( + (checked: boolean) => { + handleReasoningChange( + { + preventDefault: noop, + stopPropagation: noop, + } as unknown as React.MouseEvent<HTMLButtonElement>, + checked, + ); + + if (checked) { + // Reasoning requires temperature to be unset (None). + // Dispatch explicitly so the setTemperature middleware + persistence + // listeners fire, keeping Redux, backend, and localStorage in sync. + dispatch(setTemperature({ chatId, value: null })); + } else { + // Ensure "Reasoning" toggle truly controls reasoning. + // Backend treats `reasoning_effort` / `thinking_budget` as enabling reasoning + // even if `boost_reasoning` is turned off. + dispatch(setReasoningEffort({ chatId, value: null })); + dispatch(setThinkingBudget({ chatId, value: null })); + } + }, + [handleReasoningChange, noop, dispatch, chatId], + ); + + const handleTemperatureChange = useCallback((values: number[]) => { + setLocalTemperature(values[0]); + }, []); + + const handleTemperatureCommit = useCallback( + (values: number[]) => { + if (hasAnyReasoningConfigured) { + // UI should be disabled already, but keep commit a no-op defensively. + setLocalTemperature(null); + return; + } + dispatch(setTemperature({ chatId, value: values[0] })); + setLocalTemperature(null); + }, + [dispatch, chatId, hasAnyReasoningConfigured], + ); + + const handleTemperatureReset = useCallback(() => { + if (hasAnyReasoningConfigured) return; + dispatch(setTemperature({ chatId, value: null })); + setLocalTemperature(null); + }, [dispatch, chatId, hasAnyReasoningConfigured]); + + const handleMaxTokensReset = useCallback(() => { + dispatch(setMaxTokens({ chatId, value: null })); + setLocalMaxTokens(null); + }, [dispatch, chatId]); + + // Loading state + if (caps.loading || !areCapsInitialized) { + return ( + <Skeleton> + <div className={styles.trigger}> + <Text size="1">Loading...</Text> + <ChevronDownIcon /> + </div> + </Skeleton> + ); + } + + // Trigger display + const triggerContent = ( + <Flex align="center" gap="1" className={styles.triggerContent}> + <Text size="1" className={styles.modelName}> + {currentModelName} + </Text> + {maxTokens > 0 && ( + <> + <Text size="1" color="gray"> + · + </Text> + <Text size="1" color="gray"> + {formatTokens(effectiveCap)} + </Text> + </> + )} + {supportsBoostReasoning && isBoostReasoningEnabled && ( + <> + <Text size="1" color="gray"> + · + </Text> + <Text size="1">🧠</Text> + </> + )} + <ChevronDownIcon className={styles.chevron} /> + </Flex> + ); + + return ( + <Popover.Root open={isOpen} onOpenChange={setIsOpen}> + <Popover.Trigger> + <button + className={`${styles.trigger} ${ + isInteractionDisabled ? styles.disabled : "" + }`} + disabled={isInteractionDisabled} + type="button" + > + {triggerContent} + </button> + </Popover.Trigger> + + <Popover.Content + className={`${styles.content} ${ + advancedOpen ? styles.contentAdvancedOpen : "" + }`} + side="top" + align="start" + sideOffset={8} + > + {/* Model Section */} + <div className={`${styles.section} ${styles.modelSection}`}> + <div className={styles.modelList} ref={modelListRef}> + {groupedModels.map((group, groupIndex) => ( + <React.Fragment key={group.provider}> + {groupIndex > 0 && ( + <Separator size="4" className={styles.groupSeparator} /> + )} + <Text size="1" color="gray" className={styles.groupHeader}> + {group.displayName} + </Text> + {group.models.map((model) => { + const isSelected = caps.currentModel === model.value; + return ( + <button + key={model.value} + ref={isSelected ? selectedModelRef : undefined} + className={`${styles.item} ${ + isSelected ? styles.itemSelected : "" + } ${model.disabled ? styles.itemDisabled : ""}`} + onClick={() => handleModelSelect(model.value)} + disabled={isInteractionDisabled || model.disabled} + type="button" + > + <Flex align="center" gap="1"> + <Text + size="1" + weight="medium" + className={styles.itemModelName} + > + {model.value} + </Text> + {model.isDefault && ( + <Badge + size="1" + color="blue" + variant="soft" + className={styles.badge} + > + Default + </Badge> + )} + {model.isThinking && ( + <Badge + size="1" + color="purple" + variant="soft" + className={styles.badge} + > + Reasoning + </Badge> + )} + </Flex> + </button> + ); + })} + </React.Fragment> + ))} + <Separator size="4" className={styles.groupSeparator} /> + <button + className={styles.item} + onClick={() => handleModelSelect("add-new-model")} + type="button" + > + <Text size="1">Add new model...</Text> + </button> + </div> + </div> + + {/* Model Details */} + {selectedModelDetail && + (selectedModelDetail.nCtx || selectedModelDetail.pricing) && ( + <> + <Separator size="4" /> + <Flex gap="2" align="center" px="2" py="1"> + {selectedModelDetail.nCtx && ( + <Text size="1" color="gray"> + {formatContextWindow(selectedModelDetail.nCtx)} context + </Text> + )} + {selectedModelDetail.pricing && ( + <> + <Text size="1" color="gray"> + · + </Text> + <Text size="1" color="gray"> + {selectedModelDetail.pricing.prompt}/ + {selectedModelDetail.pricing.output} per 1M tokens + </Text> + </> + )} + </Flex> + </> + )} + + <Separator size="4" /> + + {/* Context Cap Section with Slider */} + {sliderSteps.length > 1 && ( + <> + <div className={styles.section}> + <Flex justify="between" align="center" mb="2"> + <Text + size="1" + color="gray" + weight="medium" + className={styles.sectionHeader} + > + Context window + </Text> + <Text size="1" weight="medium"> + {formatTokens(displayCap)} + {displayCap === maxTokens && " (max)"} + </Text> + </Flex> + <Flex align="center" gap="2" className={styles.sliderContainer}> + <Text size="1" color="gray"> + {formatTokens(MIN_CAP)} + </Text> + <Slider + size="1" + min={0} + max={sliderSteps.length - 1} + step={0.01} + value={[valueToSliderPosition(displayCap, sliderSteps)]} + onValueChange={handleSliderChange} + onValueCommit={handleSliderCommit} + disabled={isInteractionDisabled} + className={styles.slider} + /> + <Text size="1" color="gray"> + {formatTokens(maxTokens)} + </Text> + </Flex> + </div> + <Separator size="4" /> + </> + )} + + {/* Thinking Section */} + {supportsBoostReasoning && ( + <div className={styles.section}> + <Flex align="center" justify="between" gap="3"> + <Flex align="center" gap="1"> + <Text size="1">🧠</Text> + <Text size="1" weight="medium"> + Reasoning + </Text> + </Flex> + <Switch + size="1" + checked={isBoostReasoningEnabled} + onCheckedChange={handleThinkingToggle} + disabled={thinkingDisabled} + /> + </Flex> + + {isStartedChat && ( + <Callout.Root color="amber" size="1" mt="2"> + <Callout.Text> + Changing reasoning mid-chat may break prompt caching (if + enabled) and make the next turn much more expensive. + </Callout.Text> + </Callout.Root> + )} + + {isBoostReasoningEnabled && selectedModelDetail && ( + <> + {/* Reasoning effort options (transparent) */} + {selectedModelDetail.reasoningEffortOptions && + selectedModelDetail.reasoningEffortOptions.length > 0 && ( + <Flex align="center" justify="between" gap="2" mt="2"> + <Text size="1" color="gray"> + Effort + </Text> + <Flex gap="1"> + {selectedModelDetail.reasoningEffortOptions.map( + (level) => ( + <button + key={level} + type="button" + className={`${styles.effortButton} ${ + (threadReasoningEffort ?? "medium") === level + ? styles.effortButtonActive + : "" + }`} + onClick={() => + dispatch( + setReasoningEffort({ + chatId, + value: level as ReasoningEffort, + }), + ) + } + disabled={isInteractionDisabled} + > + <Text size="1">{level}</Text> + </button> + ), + )} + </Flex> + </Flex> + )} + {/* Thinking budget slider */} + {selectedModelDetail.supportsThinkingBudget && ( + <Flex direction="column" gap="1" mt="2"> + <Flex align="center" justify="between"> + <Text size="1" color="gray"> + Thinking tokens + </Text> + <Text size="1" weight="medium"> + {displayThinkingBudget ?? 16384} + </Text> + </Flex> + <Flex align="center" gap="2"> + <Text size="1" color="gray"> + 1K + </Text> + <Slider + size="1" + min={1024} + max={32768} + step={1024} + value={[displayThinkingBudget ?? 16384]} + onValueChange={(values) => + setLocalThinkingBudget(values[0]) + } + onValueCommit={(values) => { + dispatch( + setThinkingBudget({ chatId, value: values[0] }), + ); + setLocalThinkingBudget(null); + }} + disabled={isInteractionDisabled} + /> + <Text size="1" color="gray"> + 32K + </Text> + </Flex> + </Flex> + )} + </> + )} + </div> + )} + + <Separator size="4" /> + + {/* Advanced Settings Section */} + <Collapsible.Root open={advancedOpen} onOpenChange={setAdvancedOpen}> + <Collapsible.Trigger asChild> + <button + className={styles.advancedTrigger} + type="button" + disabled={isInteractionDisabled} + > + <Flex align="center" gap="1"> + <ChevronRightIcon + className={`${styles.advancedChevron} ${ + advancedOpen ? styles.advancedChevronOpen : "" + }`} + /> + <Text size="1" weight="medium"> + Advanced settings + </Text> + </Flex> + </button> + </Collapsible.Trigger> + <Collapsible.Content> + <div className={styles.advancedContent}> + {/* Temperature */} + <div className={styles.advancedRow}> + <Flex justify="between" align="center" mb="1"> + <Text size="1" color="gray"> + Temperature + </Text> + <Flex align="center" gap="2"> + <Text size="1" weight="medium"> + {hasAnyReasoningConfigured + ? "None" + : displayTemperature?.toFixed(1) ?? + (selectedModelDetail?.defaultTemperature?.toFixed( + 1, + ) ?? "0.7") + " (default)"} + </Text> + {threadTemperature != null && ( + <button + type="button" + className={styles.resetButton} + onClick={handleTemperatureReset} + disabled={ + isInteractionDisabled || hasAnyReasoningConfigured + } + > + ✕ + </button> + )} + </Flex> + </Flex> + <Slider + size="1" + min={0} + max={2} + step={0.1} + value={[ + displayTemperature ?? + selectedModelDetail?.defaultTemperature ?? + 0.7, + ]} + onValueChange={handleTemperatureChange} + onValueCommit={handleTemperatureCommit} + disabled={isInteractionDisabled || hasAnyReasoningConfigured} + /> + </div> + + {/* Max Tokens */} + <div className={styles.advancedRow}> + <Flex justify="between" align="center" mb="1"> + <Text size="1" color="gray"> + Max tokens + </Text> + <Flex align="center" gap="2"> + <Text size="1" weight="medium"> + {displayMaxTokens ?? + (selectedModelDetail?.defaultMaxTokens + ? `${selectedModelDetail.defaultMaxTokens} (default)` + : "4096 (default)")} + </Text> + {threadMaxTokens != null && ( + <button + type="button" + className={styles.resetButton} + onClick={handleMaxTokensReset} + disabled={isInteractionDisabled} + > + ✕ + </button> + )} + </Flex> + </Flex> + <Flex align="center" gap="2"> + <Text size="1" color="gray"> + 1K + </Text> + <Slider + size="1" + min={1024} + max={selectedModelDetail?.maxOutputTokens ?? 16384} + step={1024} + value={[ + displayMaxTokens ?? + selectedModelDetail?.defaultMaxTokens ?? + 4096, + ]} + onValueChange={(values) => setLocalMaxTokens(values[0])} + onValueCommit={(values) => { + dispatch(setMaxTokens({ chatId, value: values[0] })); + setLocalMaxTokens(null); + }} + disabled={isInteractionDisabled} + /> + <Text size="1" color="gray"> + {formatTokens( + selectedModelDetail?.maxOutputTokens ?? 16384, + )} + </Text> + </Flex> + </div> + </div> + </Collapsible.Content> + </Collapsible.Root> + </Popover.Content> + </Popover.Root> + ); +}; + +ChatSettingsDropdown.displayName = "ChatSettingsDropdown"; diff --git a/refact-agent/gui/src/components/ChatForm/FilesPreview.tsx b/refact-agent/gui/src/components/ChatForm/FilesPreview.tsx deleted file mode 100644 index 73ed3fad7..000000000 --- a/refact-agent/gui/src/components/ChatForm/FilesPreview.tsx +++ /dev/null @@ -1,58 +0,0 @@ -import React from "react"; -import { Box } from "@radix-ui/themes"; -import { Text, TruncateLeft } from "../Text"; -import { ChatContextFile } from "../../services/refact"; -import styles from "./ChatForm.module.css"; - -const FileNameAndContent: React.FC<{ - title: string; - children: React.ReactNode; -}> = ({ title, children }) => { - return ( - <pre className={styles.file}> - <Text size="1" title={title} className={styles.file_name}> - {children} - </Text> - </pre> - ); -}; - -const Preview: React.FC<{ file: string | ChatContextFile }> = ({ file }) => { - if (typeof file === "string") { - return ( - <FileNameAndContent title={file}> - 📄&nbsp;<TruncateLeft>plain text</TruncateLeft> - </FileNameAndContent> - ); - } - - const lineText = - file.line1 !== 0 && file.line2 !== 0 && `:${file.line1}-${file.line2}`; - - return ( - <FileNameAndContent title={file.file_content}> - 📎&nbsp; - <TruncateLeft> - {file.file_name} - {lineText} - </TruncateLeft> - </FileNameAndContent> - ); -}; - -export const FilesPreview: React.FC<{ - files?: (ChatContextFile | string)[]; -}> = ({ files }) => { - if (!files || files.length === 0) return null; - return ( - <Box p="2" pb="0"> - {files.map((file, i) => { - const key = - typeof file === "string" - ? `plain-text-preview-${i}` - : `file-preview-${i}-${file.file_name}`; - return <Preview key={key} file={file} />; - })} - </Box> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/MicrophoneButton.module.css b/refact-agent/gui/src/components/ChatForm/MicrophoneButton.module.css new file mode 100644 index 000000000..1b688ac60 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/MicrophoneButton.module.css @@ -0,0 +1,62 @@ +.iconButton { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-11); + cursor: pointer; + transition: filter 0.15s ease; + flex-shrink: 0; +} + +.iconButton svg { + width: 15px; + height: 15px; +} + +.iconButton:hover:not(:disabled) { + filter: brightness(1.5); +} + +.iconButton:disabled { + opacity: 0.4; + cursor: not-allowed; +} + +.iconButton.active { + color: var(--red-11); +} + +.recording { + animation: micPulse 1s ease-in-out infinite; +} + +.finishing { + animation: micBlink 0.5s ease-in-out infinite; +} + +@keyframes micPulse { + 0%, + 100% { + transform: scale(1); + opacity: 1; + } + 50% { + transform: scale(1.15); + opacity: 0.8; + } +} + +@keyframes micBlink { + 0%, + 100% { + opacity: 1; + } + 50% { + opacity: 0.4; + } +} diff --git a/refact-agent/gui/src/components/ChatForm/MicrophoneButton.tsx b/refact-agent/gui/src/components/ChatForm/MicrophoneButton.tsx new file mode 100644 index 000000000..3c7ba2cb8 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/MicrophoneButton.tsx @@ -0,0 +1,128 @@ +import React, { + useEffect, + useRef, + useImperativeHandle, + forwardRef, +} from "react"; +import { Spinner, HoverCard, Text } from "@radix-ui/themes"; +import classNames from "classnames"; +import { useVoiceInput } from "../../hooks/useVoiceInput"; +import { useAppDispatch } from "../../hooks"; +import { setError } from "../../features/Errors/errorsSlice"; +import styles from "./MicrophoneButton.module.css"; + +interface MicrophoneButtonProps { + onTranscript: (text: string) => void; + onLiveTranscript?: (text: string) => void; + onRecordingChange?: (isRecording: boolean, isFinishing: boolean) => void; + disabled?: boolean; +} + +export interface MicrophoneButtonRef { + toggleRecording: () => Promise<string | null>; +} + +export const MicrophoneButton = forwardRef< + MicrophoneButtonRef, + MicrophoneButtonProps +>(({ onTranscript, onLiveTranscript, onRecordingChange, disabled }, ref) => { + const dispatch = useAppDispatch(); + const { + isRecording, + isFinishing, + isDownloading, + voiceEnabled, + error, + liveTranscript, + toggleRecording, + } = useVoiceInput(onTranscript); + + const prevTranscriptRef = useRef(liveTranscript); + const prevRecordingRef = useRef(isRecording); + const prevFinishingRef = useRef(isFinishing); + + useImperativeHandle( + ref, + () => ({ + toggleRecording, + }), + [toggleRecording], + ); + + useEffect(() => { + if (error) { + dispatch(setError(error)); + } + }, [error, dispatch]); + + useEffect(() => { + if ( + isRecording !== prevRecordingRef.current || + isFinishing !== prevFinishingRef.current + ) { + prevRecordingRef.current = isRecording; + prevFinishingRef.current = isFinishing; + onRecordingChange?.(isRecording, isFinishing); + } + }, [isRecording, isFinishing, onRecordingChange]); + + useEffect(() => { + if (liveTranscript !== prevTranscriptRef.current) { + prevTranscriptRef.current = liveTranscript; + onLiveTranscript?.(liveTranscript); + } + }, [liveTranscript, onLiveTranscript]); + + if (!voiceEnabled) { + return null; + } + + const isActive = isRecording || isFinishing; + + return ( + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + disabled={!!disabled || isDownloading || isFinishing} + onClick={() => void toggleRecording()} + className={classNames( + styles.iconButton, + isActive && styles.active, + isRecording && styles.recording, + isFinishing && styles.finishing, + )} + aria-label="Voice input" + > + {isDownloading ? <Spinner size="1" /> : <MicrophoneIcon />} + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Voice input (Ctrl+Shift+Space) + </Text> + </HoverCard.Content> + </HoverCard.Root> + ); +}); + +MicrophoneButton.displayName = "MicrophoneButton"; + +const MicrophoneIcon: React.FC = () => ( + <svg + width="15" + height="15" + viewBox="0 0 15 15" + fill="none" + xmlns="http://www.w3.org/2000/svg" + > + <path + d="M7.5 1C6.11929 1 5 2.11929 5 3.5V7.5C5 8.88071 6.11929 10 7.5 10C8.88071 10 10 8.88071 10 7.5V3.5C10 2.11929 8.88071 1 7.5 1Z" + fill="currentColor" + /> + <path + d="M3 6.5C3.27614 6.5 3.5 6.72386 3.5 7V7.5C3.5 9.70914 5.29086 11.5 7.5 11.5C9.70914 11.5 11.5 9.70914 11.5 7.5V7C11.5 6.72386 11.7239 6.5 12 6.5C12.2761 6.5 12.5 6.72386 12.5 7V7.5C12.5 10.0376 10.5376 12.1 8 12.4649V14H10C10.2761 14 10.5 14.2239 10.5 14.5C10.5 14.7761 10.2761 15 10 15H5C4.72386 15 4.5 14.7761 4.5 14.5C4.5 14.2239 4.72386 14 5 14H7V12.4649C4.46243 12.1 2.5 10.0376 2.5 7.5V7C2.5 6.72386 2.72386 6.5 3 6.5Z" + fill="currentColor" + /> + </svg> +); diff --git a/refact-agent/gui/src/components/ChatForm/ModeSelect.module.css b/refact-agent/gui/src/components/ChatForm/ModeSelect.module.css new file mode 100644 index 000000000..c1fd52a0f --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ModeSelect.module.css @@ -0,0 +1,100 @@ +.trigger { + display: flex; + align-items: center; + gap: var(--space-1); + padding: var(--space-1) var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + color: var(--gray-12); + transition: background-color 0.15s ease; +} + +.trigger:hover:not(.disabled) { + background-color: var(--gray-a3); +} + +.trigger.disabled { + opacity: 0.5; + cursor: default; +} + +.triggerContent { + max-width: 180px; +} + +.content { + min-width: 280px; + max-width: 360px; + padding: var(--space-2); +} + +.modeList { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + display: flex; + flex-direction: column; + max-height: 320px; + overflow-y: auto; +} + +.separator { + margin: var(--space-1) 0; +} + +.item { + display: flex; + align-items: flex-start; + padding: var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + text-align: left; + width: 100%; + transition: background-color 0.15s ease; +} + +.item:hover:not(:disabled):not(.itemDisabled) { + background-color: var(--gray-a3); +} + +.item:disabled, +.item.itemDisabled { + opacity: 0.5; + cursor: default; +} + +.itemSelected { + background-color: var(--accent-a3); +} + +.itemSelected:hover:not(:disabled):not(.itemDisabled) { + background-color: var(--accent-a4); +} + +.addModeItem { + display: flex; + align-items: center; + padding: var(--space-2); + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + text-align: left; + width: 100%; + transition: background-color 0.15s ease; +} + +.addModeItem:hover { + background-color: var(--gray-a3); +} + +.description { + font-size: 11px; + line-height: 1.3; +} + +.badge { + font-size: 10px; +} diff --git a/refact-agent/gui/src/components/ChatForm/ModeSelect.tsx b/refact-agent/gui/src/components/ChatForm/ModeSelect.tsx new file mode 100644 index 000000000..db30f9986 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ModeSelect.tsx @@ -0,0 +1,304 @@ +import React, { useRef, useEffect, useState, useCallback } from "react"; +import { + Flex, + Text, + Badge, + Skeleton, + Popover, + Separator, +} from "@radix-ui/themes"; +import { + useGetChatModesQuery, + ChatModeInfo, + ChatModeThreadDefaults, +} from "../../services/refact/chatModes"; +import { DEFAULT_MODE } from "../../features/Chat/Thread/types"; +import { useAppSelector, useAppDispatch } from "../../hooks"; +import { + selectMessages, + selectCurrentThreadId, +} from "../../features/Chat/Thread"; +import { push } from "../../features/Pages/pagesSlice"; +import { ModeTransitionDialog } from "./ModeTransitionDialog"; +import styles from "./ModeSelect.module.css"; + +type ModeSelectProps = { + selectedMode: string; + onModeChange: ( + modeId: string, + threadDefaults?: ChatModeThreadDefaults, + ) => void; + disabled?: boolean; +}; + +export const ModeSelect: React.FC<ModeSelectProps> = ({ + selectedMode, + onModeChange, + disabled, +}) => { + const dispatch = useAppDispatch(); + const { data, isLoading, isError } = useGetChatModesQuery(undefined); + const messages = useAppSelector(selectMessages); + const currentChatId = useAppSelector(selectCurrentThreadId); + + const modes = data?.modes ?? []; + const effectiveMode = selectedMode || DEFAULT_MODE; + const currentMode = modes.find((m) => m.id === effectiveMode); + const currentTitle = currentMode?.title ?? effectiveMode; + const toolsCount = currentMode?.tools_count ?? 0; + + // Mode transition is needed when there are messages + const hasMessages = messages.length > 0; + const isModeDisabled = disabled ?? false; + + const [isOpen, setIsOpen] = useState(false); + const [transitionDialogOpen, setTransitionDialogOpen] = useState(false); + const [targetModeForTransition, setTargetModeForTransition] = + useState<ChatModeInfo | null>(null); + const selectedModeRef = useRef<HTMLButtonElement>(null); + const modeListRef = useRef<HTMLDivElement>(null); + + const handleModeSelect = useCallback( + (mode: ChatModeInfo) => { + if (hasMessages) { + // Open transition dialog for mode switch with context (including self-switch) + setTargetModeForTransition(mode); + setTransitionDialogOpen(true); + setIsOpen(false); + } else { + // Direct mode change (no messages) + onModeChange(mode.id, mode.thread_defaults); + setIsOpen(false); + } + }, + [hasMessages, onModeChange], + ); + + const handleTransitionDialogClose = useCallback((open: boolean) => { + setTransitionDialogOpen(open); + if (!open) { + setTargetModeForTransition(null); + } + }, []); + + useEffect(() => { + if (!isOpen) return; + + const scrollToSelected = () => { + const container = modeListRef.current; + const selected = selectedModeRef.current; + if (container && selected && container.clientHeight > 0) { + const containerHeight = container.clientHeight; + const selectedTop = selected.offsetTop; + const selectedHeight = selected.offsetHeight; + container.scrollTop = + selectedTop - containerHeight / 2 + selectedHeight / 2; + return true; + } + return false; + }; + + let attempts = 0; + const maxAttempts = 10; + const tryScroll = () => { + if (scrollToSelected() || attempts >= maxAttempts) return; + attempts++; + requestAnimationFrame(tryScroll); + }; + + requestAnimationFrame(tryScroll); + }, [isOpen]); + + const handleCreateNewMode = () => { + dispatch(push({ name: "customization", kind: "modes" })); + setIsOpen(false); + }; + + if (isLoading) { + return ( + <Skeleton> + <div className={styles.trigger}> + <Text size="1">Loading...</Text> + </div> + </Skeleton> + ); + } + + if (isError || modes.length === 0) { + return ( + <div className={`${styles.trigger} ${styles.disabled}`}> + <Text size="1" color="gray"> + {isError ? "Error" : "No modes"} + </Text> + </div> + ); + } + + const triggerContent = ( + <Flex align="center" gap="1" className={styles.triggerContent}> + <Text size="1">{currentTitle}</Text> + {toolsCount > 0 && ( + <> + <Text size="1" color="gray"> + · + </Text> + <Text size="1" color="gray"> + {toolsCount} tools + </Text> + </> + )} + </Flex> + ); + + return ( + <> + <Popover.Root open={isOpen} onOpenChange={setIsOpen}> + <Popover.Trigger> + <button + className={`${styles.trigger} ${ + isModeDisabled ? styles.disabled : "" + }`} + disabled={isModeDisabled} + type="button" + title={ + hasMessages + ? "Click to switch mode (context will be preserved)" + : undefined + } + > + {triggerContent} + </button> + </Popover.Trigger> + + <Popover.Content + className={styles.content} + side="top" + align="start" + sideOffset={8} + > + <div className={styles.modeList} ref={modeListRef}> + {modes.map((mode, index) => { + const isSelected = effectiveMode === mode.id; + return ( + <React.Fragment key={mode.id}> + {index > 0 && ( + <Separator size="4" className={styles.separator} /> + )} + <ModeMenuItem + ref={isSelected ? selectedModeRef : undefined} + mode={mode} + isSelected={isSelected} + onSelect={() => handleModeSelect(mode)} + disabled={false} + showTransitionHint={hasMessages} + isSelfSwitch={hasMessages && isSelected} + /> + </React.Fragment> + ); + })} + <Separator size="4" className={styles.separator} /> + <button + className={styles.addModeItem} + onClick={handleCreateNewMode} + type="button" + > + <Text size="1">Create new mode...</Text> + </button> + </div> + </Popover.Content> + </Popover.Root> + + {targetModeForTransition && currentChatId && ( + <ModeTransitionDialog + open={transitionDialogOpen} + onOpenChange={handleTransitionDialogClose} + chatId={currentChatId} + currentMode={effectiveMode} + targetMode={targetModeForTransition.id} + targetModeTitle={targetModeForTransition.title} + targetModeDescription={targetModeForTransition.description} + /> + )} + </> + ); +}; + +type ModeMenuItemProps = { + mode: ChatModeInfo; + isSelected: boolean; + onSelect: () => void; + disabled?: boolean; + showTransitionHint?: boolean; + isSelfSwitch?: boolean; +}; + +const ModeMenuItem = React.forwardRef<HTMLButtonElement, ModeMenuItemProps>( + ( + { mode, isSelected, onSelect, disabled, showTransitionHint, isSelfSwitch }, + ref, + ) => { + return ( + <button + ref={ref} + className={`${styles.item} ${isSelected ? styles.itemSelected : ""} ${ + disabled ? styles.itemDisabled : "" + }`} + onClick={onSelect} + type="button" + disabled={disabled} + > + <Flex direction="column" gap="1" style={{ width: "100%" }}> + <Flex align="center" gap="2"> + <Text size="1" weight="medium"> + {mode.title} + </Text> + {showTransitionHint && ( + <Badge + size="1" + color={isSelfSwitch ? "green" : "amber"} + variant="soft" + > + {isSelfSwitch ? "restart" : "switch"} + </Badge> + )} + </Flex> + + {mode.description && ( + <Text size="1" color="gray" className={styles.description}> + {mode.description.length > 80 + ? mode.description.slice(0, 80) + "..." + : mode.description} + </Text> + )} + + <Flex align="center" gap="1" wrap="wrap"> + {mode.ui.tags.slice(0, 2).map((tag) => ( + <Badge + key={tag} + size="1" + color="gray" + variant="soft" + className={styles.badge} + > + {tag} + </Badge> + ))} + {mode.tools_count > 0 && ( + <Badge + size="1" + color="blue" + variant="soft" + className={styles.badge} + > + {mode.tools_count} tools + </Badge> + )} + </Flex> + </Flex> + </button> + ); + }, +); + +ModeMenuItem.displayName = "ModeMenuItem"; +ModeSelect.displayName = "ModeSelect"; diff --git a/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.module.css b/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.module.css new file mode 100644 index 000000000..d1ba01776 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.module.css @@ -0,0 +1,11 @@ +.dialogContent { + transform: translateZ(0); +} + +.callout { + margin-top: var(--space-3); +} + +.loadingContainer { + padding: var(--space-6); +} diff --git a/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.tsx b/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.tsx new file mode 100644 index 000000000..136844f90 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ModeTransitionDialog.tsx @@ -0,0 +1,196 @@ +import React, { useCallback, useState } from "react"; +import { + Dialog, + Flex, + Text, + Button, + Callout, + Badge, + Spinner, +} from "@radix-ui/themes"; +import { ExclamationTriangleIcon } from "@radix-ui/react-icons"; +import { useApplyModeTransitionMutation } from "../../services/refact/trajectory"; +import { trajectoriesApi } from "../../services/refact/trajectories"; +import { + createChatWithId, + requestSseRefresh, + closeThread, +} from "../../features/Chat/Thread/actions"; +import { push } from "../../features/Pages/pagesSlice"; +import { useAppDispatch, useAppSelector } from "../../hooks"; +import { selectLspPort, selectApiKey } from "../../features/Config/configSlice"; +import { regenerate } from "../../services/refact/chatCommands"; +import styles from "./ModeTransitionDialog.module.css"; + +function extractErrorMessage(err: unknown): string { + if (err && typeof err === "object") { + const obj = err as Record<string, unknown>; + if (obj.data && typeof obj.data === "object") { + const data = obj.data as Record<string, unknown>; + if (typeof data.detail === "string") return data.detail; + } + if (typeof obj.data === "string") return obj.data; + if (typeof obj.message === "string") return obj.message; + } + if (err instanceof Error) return err.message; + return "Failed to apply transition"; +} + +type ModeTransitionDialogProps = { + open: boolean; + onOpenChange: (open: boolean) => void; + chatId: string; + currentMode: string; + targetMode: string; + targetModeTitle: string; + targetModeDescription: string; +}; + +function isSelfSwitch(currentMode: string, targetMode: string): boolean { + return currentMode === targetMode; +} + +export const ModeTransitionDialog: React.FC<ModeTransitionDialogProps> = ({ + open, + onOpenChange, + chatId, + currentMode, + targetMode, + targetModeTitle, + targetModeDescription, +}) => { + const dispatch = useAppDispatch(); + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + const [error, setError] = useState<string | null>(null); + + const [applyMutation, { isLoading: isApplying }] = + useApplyModeTransitionMutation(); + + const handleApply = useCallback(async () => { + setError(null); + try { + const result = await applyMutation({ + chatId, + targetMode, + targetModeDescription, + }).unwrap(); + + onOpenChange(false); + + await dispatch( + trajectoriesApi.endpoints.listAllTrajectories.initiate(undefined, { + forceRefetch: true, + }), + ).unwrap(); + + dispatch(closeThread({ id: chatId, force: true })); + dispatch( + createChatWithId({ + id: result.new_chat_id, + mode: targetMode, + parentId: chatId, + linkType: "mode_transition", + }), + ); + dispatch(requestSseRefresh({ chatId: result.new_chat_id })); + dispatch(push({ name: "chat" })); + + await regenerate(result.new_chat_id, port, apiKey ?? undefined); + } catch (err) { + const errorMessage = extractErrorMessage(err); + setError(errorMessage); + } + }, [ + chatId, + targetMode, + targetModeDescription, + applyMutation, + dispatch, + onOpenChange, + port, + apiKey, + ]); + + const handleOpenChange = useCallback( + (newOpen: boolean) => { + if (!newOpen) { + setError(null); + } + onOpenChange(newOpen); + }, + [onOpenChange], + ); + + const isSelf = isSelfSwitch(currentMode, targetMode); + + return ( + <Dialog.Root open={open} onOpenChange={handleOpenChange}> + <Dialog.Content maxWidth="500px" className={styles.dialogContent}> + <Dialog.Title> + <Flex align="center" gap="2"> + <Text>{isSelf ? "Restart Mode" : "Switch Mode"}</Text> + {isSelf ? ( + <Badge color="green">{targetModeTitle || targetMode}</Badge> + ) : ( + <> + <Badge color="gray">{currentMode}</Badge> + <Text color="gray">→</Text> + <Badge color="blue">{targetModeTitle || targetMode}</Badge> + </> + )} + </Flex> + </Dialog.Title> + + <Dialog.Description size="2" color="gray"> + {isSelf + ? "The assistant will analyze your conversation and create a fresh start with preserved context." + : "The assistant will analyze your conversation and preserve relevant context for the new mode."} + </Dialog.Description> + + {error && ( + <Callout.Root color="red" className={styles.callout}> + <Callout.Icon> + <ExclamationTriangleIcon /> + </Callout.Icon> + <Callout.Text>{error}</Callout.Text> + </Callout.Root> + )} + + {isApplying && ( + <Flex + align="center" + justify="center" + gap="2" + className={styles.loadingContainer} + > + <Spinner /> + <Text color="gray">Analyzing conversation...</Text> + </Flex> + )} + + <Flex gap="3" mt="4" justify="end"> + <Dialog.Close> + <Button variant="soft" color="gray" disabled={isApplying}> + Cancel + </Button> + </Dialog.Close> + <Button onClick={() => void handleApply()} disabled={isApplying}> + {isApplying ? ( + <> + <Spinner size="1" /> + {isSelf ? "Restarting..." : "Switching..."} + </> + ) : isSelf ? ( + "Restart Mode" + ) : ( + "Switch Mode" + )} + </Button> + </Flex> + </Dialog.Content> + </Dialog.Root> + ); +}; + +ModeTransitionDialog.displayName = "ModeTransitionDialog"; diff --git a/refact-agent/gui/src/components/ChatForm/ProjectInformationDialog.tsx b/refact-agent/gui/src/components/ChatForm/ProjectInformationDialog.tsx new file mode 100644 index 000000000..d0b075040 --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/ProjectInformationDialog.tsx @@ -0,0 +1,629 @@ +import React, { useCallback, useEffect, useMemo, useState } from "react"; +import { + Dialog, + Flex, + Text, + Button, + Switch, + ScrollArea, + Slider, + Callout, + Separator, + Badge, + IconButton, + Code, +} from "@radix-ui/themes"; +import { + ExclamationTriangleIcon, + CheckCircledIcon, + EyeOpenIcon, + Cross2Icon, +} from "@radix-ui/react-icons"; +import { + useGetProjectInformationQuery, + useSaveProjectInformationMutation, + useGetProjectInformationPreviewMutation, + ProjectInformationConfig, + ProjectInfoBlock, + defaultProjectInformationConfig, + SectionConfig, +} from "../../services/refact/projectInformation"; +import { useAppDispatch, useAppSelector } from "../../hooks"; +import { selectCurrentThreadId } from "../../features/Chat"; +import { setIncludeProjectInfo } from "../../features/Chat/Thread/actions"; + +type Props = { + open: boolean; + onOpenChange: (open: boolean) => void; +}; + +type SectionMeta = { + label: string; + field: "max_chars" | "max_chars_per_item" | "max_items"; + minTokens: number; + maxTokens: number; + stepTokens: number; +}; + +const SECTION_META: Record<string, SectionMeta> = { + system_info: { + label: "System Information", + field: "max_chars", + minTokens: 100, + maxTokens: 2000, + stepTokens: 100, + }, + environment_instructions: { + label: "Environment Instructions", + field: "max_chars", + minTokens: 250, + maxTokens: 4000, + stepTokens: 250, + }, + detected_environments: { + label: "Detected Environments", + field: "max_items", + minTokens: 5, + maxTokens: 100, + stepTokens: 5, + }, + git_info: { + label: "Git Information", + field: "max_chars", + minTokens: 250, + maxTokens: 4000, + stepTokens: 250, + }, + project_tree: { + label: "Project Tree", + field: "max_chars", + minTokens: 500, + maxTokens: 16000, + stepTokens: 500, + }, + instruction_files: { + label: "Instruction Files (AGENTS.md, etc.)", + field: "max_chars_per_item", + minTokens: 250, + maxTokens: 16000, + stepTokens: 500, + }, + project_configs: { + label: "Project Configs (.refact/)", + field: "max_chars_per_item", + minTokens: 250, + maxTokens: 8000, + stepTokens: 250, + }, + memories: { + label: "Memories", + field: "max_chars_per_item", + minTokens: 100, + maxTokens: 8000, + stepTokens: 250, + }, +}; + +const truncatePath = (path: string, maxLen = 50): string => { + if (path.length <= maxLen) return path; + const parts = path.split("/"); + if (parts.length <= 2) return "..." + path.slice(-maxLen + 3); + const filename = parts[parts.length - 1]; + const parent = parts[parts.length - 2]; + const suffix = `${parent}/${filename}`; + if (suffix.length >= maxLen - 3) return "..." + suffix.slice(-maxLen + 3); + return ".../" + suffix; +}; + +const CHARS_PER_TOKEN = 4; +const charsToTokens = (chars: number): number => + Math.ceil(chars / CHARS_PER_TOKEN); +const tokensToChars = (tokens: number): number => tokens * CHARS_PER_TOKEN; + +type ContentPreviewProps = { + block: ProjectInfoBlock | null; + onClose: () => void; +}; + +const ContentPreviewDialog: React.FC<ContentPreviewProps> = ({ + block, + onClose, +}) => { + if (!block) return null; + + const isTruncated = block.truncated && block.original_char_count; + const originalTokens = + isTruncated && block.original_char_count + ? charsToTokens(block.original_char_count) + : charsToTokens(block.char_count); + const truncatedTokens = charsToTokens(block.char_count); + + return ( + <Dialog.Root open={!!block} onOpenChange={(open) => !open && onClose()}> + <Dialog.Content + maxWidth="800px" + style={{ maxHeight: "80vh", overflow: "hidden" }} + > + <Flex justify="between" align="center" mb="3"> + <Dialog.Title style={{ margin: 0 }}> + {block.path ?? block.title} + </Dialog.Title> + <IconButton variant="ghost" onClick={onClose}> + <Cross2Icon /> + </IconButton> + </Flex> + + <Flex gap="2" mb="3" wrap="wrap"> + <Badge color="blue"> + {isTruncated + ? `${originalTokens.toLocaleString()} → ${truncatedTokens.toLocaleString()} tokens` + : `~${truncatedTokens.toLocaleString()} tokens`} + </Badge> + {isTruncated && <Badge color="orange">Truncated</Badge>} + <Badge color="gray">{block.section}</Badge> + </Flex> + + <ScrollArea style={{ maxHeight: "calc(80vh - 150px)" }}> + <Code + size="1" + style={{ + display: "block", + whiteSpace: "pre-wrap", + wordBreak: "break-word", + padding: "var(--space-3)", + backgroundColor: "var(--gray-2)", + borderRadius: "var(--radius-2)", + }} + > + {block.content || "(empty)"} + </Code> + </ScrollArea> + + <Flex justify="end" mt="3"> + <Button type="button" variant="soft" onClick={onClose}> + Close + </Button> + </Flex> + </Dialog.Content> + </Dialog.Root> + ); +}; + +type SectionRowProps = { + sectionKey: string; + config: SectionConfig; + blocks: ProjectInfoBlock[]; + onToggle: (enabled: boolean) => void; + onFieldChange: (field: string, value: number) => void; + onFileToggle?: (path: string, enabled: boolean) => void; + onPreviewBlock?: (block: ProjectInfoBlock) => void; +}; + +const SECTIONS_WITH_FILE_TOGGLES = ["instruction_files", "memories"]; + +const SectionRow: React.FC<SectionRowProps> = ({ + sectionKey, + config, + blocks, + onToggle, + onFieldChange, + onFileToggle, + onPreviewBlock, +}) => { + const meta = SECTION_META[sectionKey]; + const allSectionBlocks = blocks.filter((b) => b.section === sectionKey); + const enabledBlocks = allSectionBlocks.filter((b) => b.enabled); + const totalChars = enabledBlocks.reduce((sum, b) => sum + b.char_count, 0); + const tokens = charsToTokens(totalChars); + + const isItemsField = meta.field === "max_items"; + const currentChars = config[meta.field] ?? tokensToChars(meta.maxTokens / 2); + const currentTokens = isItemsField + ? currentChars + : charsToTokens(currentChars); + const fieldLabel = isItemsField ? "Max items" : "Max tokens"; + const showFileToggles = + SECTIONS_WITH_FILE_TOGGLES.includes(sectionKey) && + allSectionBlocks.length > 0 && + allSectionBlocks[0].path; + + const handleSliderChange = (tokenValue: number) => { + const charValue = isItemsField ? tokenValue : tokensToChars(tokenValue); + onFieldChange(meta.field, charValue); + }; + + return ( + <Flex direction="column" gap="2" py="2"> + <Flex align="center" justify="between"> + <Flex align="center" gap="2"> + <Switch + size="1" + checked={config.enabled} + onCheckedChange={onToggle} + /> + <Text size="2" weight="medium"> + {meta.label} + </Text> + </Flex> + <Badge color={config.enabled ? "blue" : "gray"} size="1"> + ~{tokens.toLocaleString()} tokens + </Badge> + </Flex> + {config.enabled && ( + <Flex direction="column" gap="1" pl="6"> + <Flex align="center" gap="2"> + <Text size="1" color="gray"> + {fieldLabel}: + </Text> + <Slider + size="1" + value={[currentTokens]} + min={meta.minTokens} + max={meta.maxTokens} + step={meta.stepTokens} + onValueChange={([v]) => handleSliderChange(v)} + style={{ width: 120 }} + /> + <Text size="1" color="gray"> + {currentTokens.toLocaleString()} + </Text> + </Flex> + {allSectionBlocks.length > 0 && ( + <Flex align="center" gap="2"> + <Text size="1" color="gray"> + {enabledBlocks.length}/{allSectionBlocks.length} item(s), ~ + {tokens.toLocaleString()} tokens + </Text> + {!showFileToggles && + allSectionBlocks.length === 1 && + onPreviewBlock && ( + <IconButton + size="1" + variant="ghost" + onClick={() => onPreviewBlock(allSectionBlocks[0])} + title="View content" + > + <EyeOpenIcon /> + </IconButton> + )} + </Flex> + )} + {showFileToggles && onFileToggle && ( + <Flex + direction="column" + gap="1" + mt="2" + style={{ maxWidth: "100%", overflow: "hidden" }} + > + {allSectionBlocks.map((block) => ( + <Flex + key={block.id} + align="center" + gap="2" + style={{ + opacity: block.enabled ? 1 : 0.6, + minWidth: 0, + }} + > + <Switch + size="1" + checked={block.enabled} + onCheckedChange={(checked) => { + if (block.path) { + onFileToggle(block.path, checked); + } + }} + style={{ flexShrink: 0 }} + /> + <Text + size="1" + style={{ + flex: 1, + minWidth: 0, + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + }} + title={block.path ?? block.title} + > + {truncatePath(block.path ?? block.title, 45)} + </Text> + <Text + size="1" + color="gray" + style={{ flexShrink: 0, whiteSpace: "nowrap" }} + > + {block.original_char_count + ? `${charsToTokens( + block.original_char_count, + ).toLocaleString()}→${charsToTokens( + block.char_count, + ).toLocaleString()}` + : `~${charsToTokens( + block.char_count, + ).toLocaleString()}`}{" "} + tok + </Text> + {onPreviewBlock && ( + <IconButton + size="1" + variant="ghost" + onClick={() => onPreviewBlock(block)} + title="View content" + style={{ flexShrink: 0 }} + > + <EyeOpenIcon /> + </IconButton> + )} + </Flex> + ))} + </Flex> + )} + </Flex> + )} + </Flex> + ); +}; + +export const ProjectInformationDialog: React.FC<Props> = ({ + open, + onOpenChange, +}) => { + const dispatch = useAppDispatch(); + const chatId = useAppSelector(selectCurrentThreadId); + const { data: savedConfig, isLoading } = useGetProjectInformationQuery( + undefined, + { + skip: !open, + }, + ); + const [saveConfig, { isLoading: isSaving }] = + useSaveProjectInformationMutation(); + const [triggerPreview, { data: previewData, isLoading: isPreviewing }] = + useGetProjectInformationPreviewMutation(); + + const [localConfig, setLocalConfig] = useState<ProjectInformationConfig>( + defaultProjectInformationConfig, + ); + const [saveError, setSaveError] = useState<string | null>(null); + const [saveSuccess, setSaveSuccess] = useState(false); + const [previewBlock, setPreviewBlock] = useState<ProjectInfoBlock | null>( + null, + ); + + useEffect(() => { + if (savedConfig) { + setLocalConfig(savedConfig); + } + }, [savedConfig]); + + useEffect(() => { + if (!open) { + setSaveError(null); + setSaveSuccess(false); + } + }, [open]); + + useEffect(() => { + if (open && localConfig.enabled) { + const timeoutId = setTimeout(() => { + void triggerPreview(localConfig); + }, 300); + return () => clearTimeout(timeoutId); + } + }, [open, localConfig, triggerPreview]); + + const blocks = useMemo( + () => previewData?.blocks ?? [], + [previewData?.blocks], + ); + + const totalTokens = useMemo(() => { + if (!localConfig.enabled) return 0; + const enabledBlocks = blocks.filter((b) => b.enabled); + const totalChars = enabledBlocks.reduce((sum, b) => sum + b.char_count, 0); + return charsToTokens(totalChars); + }, [blocks, localConfig.enabled]); + + const updateSection = useCallback( + ( + sectionKey: keyof ProjectInformationConfig["sections"], + updates: Partial<SectionConfig>, + ) => { + setLocalConfig((prev) => ({ + ...prev, + sections: { + ...prev.sections, + [sectionKey]: { + ...prev.sections[sectionKey], + ...updates, + }, + }, + })); + }, + [], + ); + + const updateFileOverride = useCallback( + ( + sectionKey: keyof ProjectInformationConfig["sections"], + path: string, + enabled: boolean, + ) => { + setLocalConfig((prev) => { + const section = prev.sections[sectionKey]; + const currentOverrides = section.overrides ?? {}; + const currentOverride = + (currentOverrides[path] as Record<string, unknown> | undefined) ?? {}; + return { + ...prev, + sections: { + ...prev.sections, + [sectionKey]: { + ...section, + overrides: { + ...currentOverrides, + [path]: { + ...currentOverride, + enabled, + }, + }, + }, + }, + }; + }); + }, + [], + ); + + const handleSave = useCallback(async () => { + setSaveError(null); + setSaveSuccess(false); + try { + await saveConfig(localConfig).unwrap(); + setSaveSuccess(true); + setTimeout(() => onOpenChange(false), 500); + } catch (err) { + setSaveError( + err instanceof Error ? err.message : "Failed to save configuration", + ); + } + }, [saveConfig, localConfig, onOpenChange]); + + const handleReset = useCallback(() => { + setLocalConfig(defaultProjectInformationConfig); + }, []); + + if (isLoading) { + return ( + <Dialog.Root open={open} onOpenChange={onOpenChange}> + <Dialog.Content maxWidth="600px"> + <Dialog.Title>Project Information</Dialog.Title> + <Flex align="center" justify="center" py="6"> + <Text color="gray">Loading...</Text> + </Flex> + </Dialog.Content> + </Dialog.Root> + ); + } + + return ( + <Dialog.Root open={open} onOpenChange={onOpenChange}> + <Dialog.Content maxWidth="600px" style={{ overflow: "hidden" }}> + <Dialog.Title>Project Information</Dialog.Title> + <Dialog.Description size="2" color="gray" mb="4"> + Configure what project information is included in chat context. Token + counts are approximate (~4 chars/token). + </Dialog.Description> + + {saveError && ( + <Callout.Root color="red" mb="3"> + <Callout.Icon> + <ExclamationTriangleIcon /> + </Callout.Icon> + <Callout.Text>{saveError}</Callout.Text> + </Callout.Root> + )} + + {saveSuccess && ( + <Callout.Root color="green" mb="3"> + <Callout.Icon> + <CheckCircledIcon /> + </Callout.Icon> + <Callout.Text>Configuration saved!</Callout.Text> + </Callout.Root> + )} + + <Flex align="center" justify="between" mb="3"> + <Flex align="center" gap="2"> + <Switch + checked={localConfig.enabled} + onCheckedChange={(enabled) => { + setLocalConfig((prev) => ({ ...prev, enabled })); + if (chatId) { + dispatch(setIncludeProjectInfo({ chatId, value: enabled })); + } + }} + /> + <Text weight="medium">Include project information</Text> + </Flex> + <Badge color="blue" size="2"> + Total: ~{totalTokens.toLocaleString()} tokens + {isPreviewing && " (updating...)"} + </Badge> + </Flex> + + <Separator size="4" mb="3" /> + + <ScrollArea style={{ maxHeight: 400 }}> + <Flex direction="column" gap="1"> + {Object.keys(SECTION_META).map((sectionKey) => { + const key = + sectionKey as keyof ProjectInformationConfig["sections"]; + return ( + <React.Fragment key={sectionKey}> + <SectionRow + sectionKey={sectionKey} + config={localConfig.sections[key]} + blocks={blocks} + onToggle={(enabled) => updateSection(key, { enabled })} + onFieldChange={(field, value) => + updateSection(key, { [field]: value }) + } + onFileToggle={(path, enabled) => + updateFileOverride(key, path, enabled) + } + onPreviewBlock={setPreviewBlock} + /> + <Separator size="4" /> + </React.Fragment> + ); + })} + </Flex> + </ScrollArea> + + {previewData?.warnings && previewData.warnings.length > 0 && ( + <Callout.Root color="orange" mt="3"> + <Callout.Icon> + <ExclamationTriangleIcon /> + </Callout.Icon> + <Callout.Text> + {previewData.warnings.length} warning(s):{" "} + {previewData.warnings[0]} + {previewData.warnings.length > 1 && + ` (+${previewData.warnings.length - 1} more)`} + </Callout.Text> + </Callout.Root> + )} + + <Flex gap="3" mt="4" justify="end"> + <Button + type="button" + variant="soft" + color="gray" + onClick={handleReset} + > + Reset to Defaults + </Button> + <Dialog.Close> + <Button type="button" variant="soft" color="gray"> + Cancel + </Button> + </Dialog.Close> + <Button + type="button" + onClick={() => void handleSave()} + disabled={isSaving} + > + {isSaving ? "Saving..." : "Save"} + </Button> + </Flex> + + <ContentPreviewDialog + block={previewBlock} + onClose={() => setPreviewBlock(null)} + /> + </Dialog.Content> + </Dialog.Root> + ); +}; diff --git a/refact-agent/gui/src/components/ChatForm/PromptSelect.tsx b/refact-agent/gui/src/components/ChatForm/PromptSelect.tsx deleted file mode 100644 index f4d21af6e..000000000 --- a/refact-agent/gui/src/components/ChatForm/PromptSelect.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import React, { useCallback, useMemo } from "react"; -import { Flex, Skeleton, Text, Box } from "@radix-ui/themes"; -import { Select } from "../Select"; -import type { SystemPrompts } from "../../services/refact"; -import { - useAppDispatch, - useAppSelector, - useGetPromptsQuery, - useGetCapsQuery, -} from "../../hooks"; -import { getSelectedSystemPrompt } from "../../features/Chat/Thread/selectors"; -import { setSystemPrompt } from "../../features/Chat/Thread/actions"; - -export const PromptSelect: React.FC = () => { - const dispatch = useAppDispatch(); - const promptsRequest = useGetPromptsQuery(); - const selectedSystemPrompt = useAppSelector(getSelectedSystemPrompt); - const onSetSelectedSystemPrompt = useCallback( - (prompt: SystemPrompts) => dispatch(setSystemPrompt(prompt)), - [dispatch], - ); - - const handleChange = useCallback( - (key: string) => { - if (!promptsRequest.data) return; - if (!(key in promptsRequest.data)) return; - const promptValue = promptsRequest.data[key]; - const prompt = { [key]: promptValue }; - onSetSelectedSystemPrompt(prompt); - }, - [onSetSelectedSystemPrompt, promptsRequest.data], - ); - - const caps = useGetCapsQuery(); - - const default_system_prompt = useMemo(() => { - if ( - caps.data?.code_chat_default_system_prompt && - caps.data.code_chat_default_system_prompt !== "" - ) { - return caps.data.code_chat_default_system_prompt; - } - return "default"; - }, [caps.data?.code_chat_default_system_prompt]); - - const val = useMemo( - () => Object.keys(selectedSystemPrompt)[0] ?? default_system_prompt, - [selectedSystemPrompt, default_system_prompt], - ); - - const options = useMemo(() => { - return Object.entries(promptsRequest.data ?? {}).map(([key, value]) => { - return { - value: key, - title: value.description || value.text, - }; - }); - }, [promptsRequest.data]); - - const isLoading = useMemo( - () => - promptsRequest.isLoading || promptsRequest.isFetching || caps.isLoading, - [promptsRequest.isLoading, promptsRequest.isFetching, caps.isLoading], - ); - - if (options.length <= 1) return null; - - return ( - <Flex - gap="2" - align="center" - wrap="wrap" - flexGrow="1" - flexShrink="0" - width="100%" - > - <Text size="2" wrap="nowrap"> - System Prompt: - </Text> - <Skeleton loading={isLoading}> - <Box flexGrow="1" flexShrink="0"> - <Select - name="system prompt" - disabled={promptsRequest.isLoading} - onChange={handleChange} - value={val} - options={options} - /> - </Box> - </Skeleton> - </Flex> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/RetryForm.tsx b/refact-agent/gui/src/components/ChatForm/RetryForm.tsx index 70aa5b6a3..d1670560d 100644 --- a/refact-agent/gui/src/components/ChatForm/RetryForm.tsx +++ b/refact-agent/gui/src/components/ChatForm/RetryForm.tsx @@ -1,19 +1,42 @@ -import React, { useCallback, useMemo, useState } from "react"; -import { Avatar, Button, Flex, Box } from "@radix-ui/themes"; +import React, { + useCallback, + useEffect, + useMemo, + useRef, + useState, +} from "react"; +import { + Button, + Flex, + Box, + IconButton, + Popover, + Text, + Separator, + Badge, +} from "@radix-ui/themes"; import { FileRejection, useDropzone } from "react-dropzone"; import { TextArea } from "../TextArea"; -import { useOnPressedEnter } from "../../hooks/useOnPressedEnter"; -import { Form } from "./Form"; import { useAppSelector, useCapsForToolUse } from "../../hooks"; -import { selectSubmitOption } from "../../features/Config/configSlice"; + import { ProcessedUserMessageContentWithImages, UserImage, UserMessage, } from "../../services/refact"; -import { ImageIcon, CrossCircledIcon } from "@radix-ui/react-icons"; +import { + Cross2Icon, + CheckIcon, + PlusIcon, + ChevronDownIcon, +} from "@radix-ui/react-icons"; import { useAttachedImages } from "../../hooks/useAttachedImages"; import { selectIsStreaming, selectIsWaiting } from "../../features/Chat"; +import { enrichAndGroupModels } from "../../utils/enrichModels"; +import styles from "./ChatForm.module.css"; +import dropdownStyles from "./ChatSettingsDropdown.module.css"; +import classNames from "classnames"; +import { DialogImage } from "../DialogImage"; function getTextFromUserMessage(messages: UserMessage["content"]): string { if (typeof messages === "string") return messages; @@ -52,12 +75,10 @@ function getImageContent( } export const RetryForm: React.FC<{ - // value: string; value: UserMessage["content"]; onSubmit: (value: UserMessage["content"]) => void; onClose: () => void; }> = (props) => { - const shiftEnterToSubmit = useAppSelector(selectSubmitOption); const { isMultimodalitySupportedForCurrentModel } = useCapsForToolUse(); const inputText = getTextFromUserMessage(props.value); const inputImages = getImageFromUserMessage(props.value); @@ -65,6 +86,7 @@ export const RetryForm: React.FC<{ const [imageValue, onChangeImageValue] = useState(inputImages); const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); + const formRef = useRef<HTMLDivElement>(null); const disableInput = useMemo( () => isStreaming || isWaiting, @@ -77,36 +99,76 @@ export const RetryForm: React.FC<{ }); }, []); - const closeAndReset = () => { + const closeAndReset = useCallback(() => { onChangeImageValue(inputImages); onChangeTextValue(inputText); props.onClose(); - }; + }, [inputImages, inputText, props]); + + // Click outside to cancel edit + useEffect(() => { + const handleClickOutside = (event: MouseEvent) => { + if (formRef.current && !formRef.current.contains(event.target as Node)) { + closeAndReset(); + } + }; + + // Use mousedown to catch the click before focus changes + document.addEventListener("mousedown", handleClickOutside); + return () => { + document.removeEventListener("mousedown", handleClickOutside); + }; + }, [closeAndReset]); - const handleRetry = () => { + const handleRetry = useCallback(() => { const trimmedText = textValue.trim(); if (imageValue.length === 0 && trimmedText.length > 0) { props.onSubmit(trimmedText); - } else if (trimmedText.length > 0) { - const text = { - type: "text" as const, - text: textValue.trim(), - }; - props.onSubmit([text, ...imageValue]); + } else if (trimmedText.length > 0 || imageValue.length > 0) { + const content: ( + | { type: "text"; text: string } + | UserImage + | ProcessedUserMessageContentWithImages + )[] = []; + if (trimmedText.length > 0) { + content.push({ type: "text" as const, text: trimmedText }); + } + content.push(...imageValue); + props.onSubmit( + content.length === 1 && trimmedText ? trimmedText : content, + ); } - }; - - const onPressedEnter = useOnPressedEnter(handleRetry); + }, [textValue, imageValue, props]); const handleOnKeyDown = useCallback( (event: React.KeyboardEvent<HTMLTextAreaElement>) => { - if (shiftEnterToSubmit && !event.shiftKey && event.key === "Enter") { - onChangeTextValue(textValue + "\n"); + // Don't handle during IME composition + if (event.nativeEvent.isComposing) { + return; + } + + // Escape: cancel and close + if (event.key === "Escape") { + event.preventDefault(); + closeAndReset(); + return; + } + + // Enter without Shift: submit + if (event.key === "Enter" && !event.shiftKey) { + event.preventDefault(); + if ( + !disableInput && + (textValue.trim().length > 0 || imageValue.length > 0) + ) { + handleRetry(); + } return; } - onPressedEnter(event); + + // Shift+Enter: allow newline (default behavior, no preventDefault) }, - [onPressedEnter, shiftEnterToSubmit, textValue], + [closeAndReset, disableInput, textValue, imageValue, handleRetry], ); const handleRemove = useCallback((index: number) => { @@ -116,80 +178,87 @@ export const RetryForm: React.FC<{ }, []); return ( - <Form - onSubmit={(event) => { - event.preventDefault(); - handleRetry(); - }} + <Box + ref={formRef} + className={classNames(styles.chatForm, styles.chatFormCompact)} > - <TextArea - value={textValue} - onChange={(event) => onChangeTextValue(event.target.value)} - onKeyDown={handleOnKeyDown} - /> - - {imageValue.length > 0 && ( - <Flex - px="2" - py="4" - wrap="wrap" - direction="row" - align="center" - justify="center" - style={{ - backgroundColor: "var(--color-surface)", - }} - > - {imageValue.map((image, index) => { - return ( - <MyImage - key={`retry-user-image-${index}`} - image={getImageContent(image)} - onRemove={() => handleRemove(index)} - /> - ); - })} - </Flex> - )} - - <Flex - align="center" - justify="center" - gap="1" - direction="row" - p="2" - wrap="wrap" - style={{ - backgroundColor: "var(--color-surface)", + <form + onSubmit={(event) => { + event.preventDefault(); + handleRetry(); }} > - <Button - color="grass" - variant="surface" - size="1" - type="submit" - disabled={disableInput} - > - Submit - </Button> - <Button - variant="surface" - color="tomato" - size="1" - onClick={closeAndReset} - > - Cancel - </Button> - - {isMultimodalitySupportedForCurrentModel && ( - <MyDropzone addImage={addImage} /> + {/* Attachments at top */} + {imageValue.length > 0 && ( + <Flex + px="3" + py="2" + wrap="wrap" + direction="row" + align="center" + gap="2" + > + {imageValue.map((image, index) => { + return ( + <RetryImage + key={`retry-user-image-${index}`} + image={getImageContent(image)} + onRemove={() => handleRemove(index)} + /> + ); + })} + </Flex> )} - </Flex> - </Form> + + {/* TextArea */} + <Box className={styles.textareaWrapper}> + <TextArea + value={textValue} + onChange={(event) => onChangeTextValue(event.target.value)} + onKeyDown={handleOnKeyDown} + autoFocus + style={{ boxShadow: "none", outline: "none" }} + /> + </Box> + + {/* Bottom controls */} + <Flex align="center" gap="2" py="2" px="3"> + <Button + variant="ghost" + color="gray" + size="1" + type="button" + onClick={closeAndReset} + > + <Cross2Icon width={14} height={14} /> + Cancel + </Button> + + <Box flexGrow="1" /> + + <RetryModelSelector disabled={disableInput} /> + {isMultimodalitySupportedForCurrentModel && ( + <RetryDropzone addImage={addImage} /> + )} + <Button + variant="solid" + size="1" + type="submit" + disabled={ + disableInput || + (textValue.trim().length === 0 && imageValue.length === 0) + } + > + <CheckIcon width={14} height={14} /> + Submit + </Button> + </Flex> + </form> + </Box> ); }; -const MyDropzone: React.FC<{ +const RetryDropzone: React.FC<{ addImage: (image: UserImage) => void; }> = ({ addImage }) => { const { setError, setWarning } = useAttachedImages(); @@ -237,49 +306,208 @@ const MyDropzone: React.FC<{ }); return ( - <div {...getRootProps()}> + <div {...getRootProps()} style={{ display: "flex", alignItems: "center" }}> <input {...getInputProps()} style={{ display: "none" }} /> <Button size="1" - variant="surface" + variant="ghost" color="gray" + type="button" onClick={(event) => { event.preventDefault(); event.stopPropagation(); open(); }} > - Add images + <PlusIcon width={14} height={14} /> + Add image </Button> </div> ); }; -const MyImage: React.FC<{ image: string; onRemove: () => void }> = ({ +const RetryImage: React.FC<{ image: string; onRemove: () => void }> = ({ image, onRemove, }) => { return ( - <Box position="relative"> - <Button - variant="ghost" + <Box position="relative" style={{ display: "inline-block" }}> + <DialogImage src={image} size="5" /> + <IconButton + variant="solid" + color="gray" + size="1" + type="button" onClick={(event) => { event.preventDefault(); event.stopPropagation(); onRemove(); }} + style={{ + position: "absolute", + right: -6, + top: -6, + width: 18, + height: 18, + padding: 0, + borderRadius: "50%", + }} > - <CrossCircledIcon - width="16" - color="gray" - style={{ - position: "absolute", - right: "calc(var(--space-2) * -1)", - top: "calc(var(--space-2) * -1)", - }} - /> - <Avatar src={image} size="4" fallback={<ImageIcon />} /> - </Button> + <Cross2Icon width={10} height={10} /> + </IconButton> </Box> ); }; + +const RetryModelSelector: React.FC<{ disabled?: boolean }> = ({ disabled }) => { + const caps = useCapsForToolUse(); + const [isOpen, setIsOpen] = useState(false); + const selectedModelRef = useRef<HTMLButtonElement>(null); + const modelListRef = useRef<HTMLDivElement>(null); + + const currentModelName = caps.currentModel || "Select model"; + + const groupedModels = useMemo(() => { + return enrichAndGroupModels(caps.usableModelsForPlan, caps.data); + }, [caps.usableModelsForPlan, caps.data]); + + useEffect(() => { + if (!isOpen) return; + + const scrollToSelected = () => { + const container = modelListRef.current; + const selected = selectedModelRef.current; + if (container && selected && container.clientHeight > 0) { + const containerHeight = container.clientHeight; + const selectedTop = selected.offsetTop; + const selectedHeight = selected.offsetHeight; + container.scrollTop = + selectedTop - containerHeight / 2 + selectedHeight / 2; + return true; + } + return false; + }; + + let attempts = 0; + const maxAttempts = 10; + const tryScroll = () => { + if (scrollToSelected() || attempts >= maxAttempts) return; + attempts++; + requestAnimationFrame(tryScroll); + }; + + requestAnimationFrame(tryScroll); + }, [isOpen]); + + const handleModelSelect = useCallback( + (modelValue: string) => { + caps.setCapModel(modelValue); + setIsOpen(false); + }, + [caps], + ); + + if (caps.loading) { + return null; + } + + return ( + <Popover.Root open={isOpen} onOpenChange={setIsOpen}> + <Popover.Trigger> + <button + className={classNames(dropdownStyles.trigger, { + [dropdownStyles.disabled]: disabled, + })} + disabled={disabled} + type="button" + > + <Flex + align="center" + gap="1" + className={dropdownStyles.triggerContent} + > + <Text size="1" className={dropdownStyles.modelName}> + {currentModelName} + </Text> + <ChevronDownIcon className={dropdownStyles.chevron} /> + </Flex> + </button> + </Popover.Trigger> + + <Popover.Content + className={dropdownStyles.content} + side="top" + align="end" + sideOffset={8} + > + <div className={dropdownStyles.section}> + <div className={dropdownStyles.modelList} ref={modelListRef}> + {groupedModels.map((group, groupIndex) => ( + <React.Fragment key={group.provider}> + {groupIndex > 0 && ( + <Separator + size="4" + className={dropdownStyles.groupSeparator} + /> + )} + <Text + size="1" + color="gray" + className={dropdownStyles.groupHeader} + > + {group.displayName} + </Text> + {group.models.map((model) => { + const isSelected = caps.currentModel === model.value; + return ( + <button + key={model.value} + ref={isSelected ? selectedModelRef : undefined} + className={classNames(dropdownStyles.item, { + [dropdownStyles.itemSelected]: isSelected, + [dropdownStyles.itemDisabled]: model.disabled, + })} + onClick={() => handleModelSelect(model.value)} + disabled={disabled ?? model.disabled} + type="button" + > + <Flex align="center" gap="1"> + <Text + size="1" + weight="medium" + className={dropdownStyles.itemModelName} + > + {model.value} + </Text> + {model.isDefault && ( + <Badge + size="1" + color="blue" + variant="soft" + className={dropdownStyles.badge} + > + Default + </Badge> + )} + {model.isThinking && ( + <Badge + size="1" + color="purple" + variant="soft" + className={dropdownStyles.badge} + > + Reasoning + </Badge> + )} + </Flex> + </button> + ); + })} + </React.Fragment> + ))} + </div> + </div> + </Popover.Content> + </Popover.Root> + ); +}; diff --git a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.module.css b/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.module.css deleted file mode 100644 index c56c1f30c..000000000 --- a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.module.css +++ /dev/null @@ -1,14 +0,0 @@ -.container { - background-color: var(--violet-a2); - border-radius: var(--radius-2); - border: 1px solid var(--violet-a5); - overflow: hidden; - transition: all 0.3s ease-in-out; - transform: translateY(100%); - opacity: 0; -} - -.visible { - transform: translateY(0%); - opacity: 1; -} diff --git a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.tsx b/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.tsx deleted file mode 100644 index 350d87108..000000000 --- a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/SuggestNewChat.tsx +++ /dev/null @@ -1,162 +0,0 @@ -import { Box, Flex, IconButton, Text } from "@radix-ui/themes"; -import { ArchiveIcon, Cross2Icon } from "@radix-ui/react-icons"; -import { useCallback, useEffect, useMemo, useState } from "react"; -import classNames from "classnames"; - -import { clearPauseReasonsAndHandleToolsStatus } from "../../../features/ToolConfirmation/confirmationSlice"; -import { - useAppDispatch, - useAppSelector, - useCompressChat, - useLastSentCompressionStop, -} from "../../../hooks"; -import { popBackTo, push } from "../../../features/Pages/pagesSlice"; -import { telemetryApi } from "../../../services/refact"; -import { - enableSend, - newChatAction, - selectChatId, - setIsNewChatSuggestionRejected, -} from "../../../features/Chat"; - -import { Link } from "../../Link"; - -import styles from "./SuggestNewChat.module.css"; -import { useUsageCounter } from "../../UsageCounter/useUsageCounter"; - -type SuggestNewChatProps = { - shouldBeVisible?: boolean; -}; - -export const SuggestNewChat = ({ - shouldBeVisible = false, -}: SuggestNewChatProps) => { - const dispatch = useAppDispatch(); - const chatId = useAppSelector(selectChatId); - const [sendTelemetryEvent] = - telemetryApi.useLazySendTelemetryChatEventQuery(); - - const { isWarning, isOverflown: isContextOverflown } = useUsageCounter(); - - const [isRendered, setIsRendered] = useState(shouldBeVisible); - const [isAnimating, setIsAnimating] = useState(false); - const { compressChat, isCompressing } = useCompressChat(); - const lastSentCompression = useLastSentCompressionStop(); - - useEffect(() => { - if (shouldBeVisible) { - setIsRendered(true); - // small delay to ensure the initial state is rendered before animation - requestAnimationFrame(() => { - requestAnimationFrame(() => { - setIsAnimating(true); - }); - }); - } else { - setIsAnimating(false); - const timer = setTimeout(() => { - setIsRendered(false); - }, 300); - return () => { - clearTimeout(timer); - }; - } - }, [shouldBeVisible]); - - const handleClose = () => { - dispatch(setIsNewChatSuggestionRejected({ chatId, value: true })); - dispatch(enableSend({ id: chatId })); - - void sendTelemetryEvent({ - scope: `dismissedNewChatSuggestionWarning`, - success: true, - error_message: "", - }); - }; - - const onCreateNewChat = useCallback(() => { - const actions = [ - newChatAction(), - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: false, - confirmationStatus: true, - }), - popBackTo({ name: "history" }), - push({ name: "chat" }), - ]; - - actions.forEach((action) => dispatch(action)); - void sendTelemetryEvent({ - scope: `openNewChat`, - success: true, - error_message: "", - }); - }, [dispatch, sendTelemetryEvent]); - - const tipText = useMemo(() => { - if (isWarning) - return "This chat has been moderately compressed. The model may have limited access to earlier messages."; - if (isContextOverflown) - return "This chat has been heavily compressed. The model might not recall details from earlier conversations."; - return "For best results, consider starting a new chat when switching topics."; - }, [isWarning, isContextOverflown]); - - if (isCompressing) return null; - - return ( - <Box - py="3" - px="4" - mb="1" - flexShrink="0" - display={isRendered ? "block" : "none"} - className={classNames(styles.container, { - [styles.visible]: isAnimating, - })} - > - <Flex align="center" justify="between" gap="2" wrap="wrap"> - <Text size="1"> - <Text weight="bold">Tip:</Text> {tipText} - </Text> - - <Flex align="center" mr="2" wrap="wrap" gap="2"> - <Link size="1" onClick={onCreateNewChat} color="indigo"> - Start a new chat - </Link> - {lastSentCompression.strength && - lastSentCompression.strength !== "absent" && ( - <Link - size="1" - onClick={() => { - void compressChat(); - }} - color="indigo" - asChild - > - <Flex - align="center" - justify="start" - gap="1" - display="inline-flex" - > - <ArchiveIcon style={{ alignSelf: "start" }} /> - Summarize and continue in a new chat. - </Flex> - </Link> - )} - </Flex> - <Box position="absolute" top="1" right="1"> - <IconButton - asChild - variant="ghost" - color="violet" - size="1" - onClick={handleClose} - > - <Cross2Icon /> - </IconButton> - </Box> - </Flex> - </Box> - ); -}; diff --git a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/index.ts b/refact-agent/gui/src/components/ChatForm/SuggestNewChat/index.ts deleted file mode 100644 index 6ea5a4a0c..000000000 --- a/refact-agent/gui/src/components/ChatForm/SuggestNewChat/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { SuggestNewChat } from "./SuggestNewChat"; diff --git a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.module.css b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.module.css index d43e23677..01ab586eb 100644 --- a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.module.css +++ b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.module.css @@ -7,6 +7,9 @@ border-radius: 8px; background-color: var(--gray-1); flex-shrink: 0; /* this one should fix issue with shrinking */ + /* Force GPU compositing to fix JCEF repaint issues in JetBrains IDEs */ + transform: translateZ(0); + will-change: transform; & pre { margin: 0; } @@ -32,3 +35,17 @@ display: block; font-weight: 700; } + +.CacheGuardDiff { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + width: 100%; + max-height: 360px; + overflow: auto; + padding: 10px; + border: 1px solid var(--gray-6); + border-radius: 6px; + background-color: var(--gray-2); + font-family: var(--default-font-family); + font-size: 12px; + white-space: pre; +} diff --git a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.stories.tsx b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.stories.tsx index 757c3c044..1ee6c4817 100644 --- a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.stories.tsx +++ b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.stories.tsx @@ -16,16 +16,7 @@ import { const MockedStore: React.FC<{ pauseReasons: ToolConfirmationPauseReason[]; }> = ({ pauseReasons }) => { - const store = setUpStore({ - confirmation: { - pauseReasons, - pause: true, - status: { - wasInteracted: false, - confirmationStatus: false, - }, - }, - }); + const store = setUpStore(); return ( <Provider store={store}> diff --git a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.tsx b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.tsx index a350218e9..88dba6f87 100644 --- a/refact-agent/gui/src/components/ChatForm/ToolConfirmation.tsx +++ b/refact-agent/gui/src/components/ChatForm/ToolConfirmation.tsx @@ -1,11 +1,5 @@ -import React, { useCallback, useMemo } from "react"; -import { - PATCH_LIKE_FUNCTIONS, - useAppDispatch, - useAppSelector, - useSendChatRequest, - // useEventsBusForIDE -} from "../../hooks"; +import React, { useCallback, useMemo, useState } from "react"; +import { useAppDispatch, useAppSelector, useChatActions } from "../../hooks"; import { Card, Button, Text, Flex } from "@radix-ui/themes"; import { Markdown } from "../Markdown"; import { Link } from "../Link"; @@ -14,105 +8,230 @@ import { push } from "../../features/Pages/pagesSlice"; import { isAssistantMessage, ToolConfirmationPauseReason, + ToolCall, } from "../../services/refact"; import { selectChatId, selectMessages, - setAutomaticPatch, + setAutoApproveEditingTools, } from "../../features/Chat"; +import { PATCH_LIKE_FUNCTIONS } from "./constants"; type ToolConfirmationProps = { pauseReasons: ToolConfirmationPauseReason[]; }; const getConfirmationMessage = ( - commands: string[], + toolNames: string[], rules: string[], types: string[], - confirmationCommands: string[], - denialCommands: string[], + confirmationToolNames: string[], + denialToolNames: string[], ) => { - const ruleText = `${rules.join(", ")}`; + const normalizedRules = rules.filter((r) => r.trim().length > 0); + const ruleText = normalizedRules.map((r) => `\`${r}\``).join(", "); + const ruleClause = + normalizedRules.length > 0 + ? ` due to ${ruleText} ${normalizedRules.length > 1 ? "rules" : "rule"}` + : ""; + if (types.every((type) => type === "confirmation")) { return `${ - commands.length > 1 ? "Commands need" : "Command needs" - } confirmation due to \`\`\`${ruleText}\`\`\` ${ - rules.length > 1 ? "rules" : "rule" - }.`; + toolNames.length > 1 ? "Commands need" : "Command needs" + } confirmation${ruleClause}.`; } else if (types.every((type) => type === "denial")) { return `${ - commands.length > 1 ? "Commands were" : "Command was" - } denied due to \`\`\`${ruleText}\`\`\` ${ - rules.length > 1 ? "rules" : "rule" - }.`; + toolNames.length > 1 ? "Commands were" : "Command was" + } denied${ruleClause}.`; } else { return `${ - confirmationCommands.length > 1 ? "Commands need" : "Command needs" - } confirmation: ${confirmationCommands.join(", ")}.\n\nFollowing ${ - denialCommands.length > 1 ? "commands were" : "command was" - } denied: ${denialCommands.join( - ", ", - )}.\n\nAll due to \`\`\`${ruleText}\`\`\` ${ - rules.length > 1 ? "rules" : "rule" - }.`; + confirmationToolNames.length > 1 ? "Commands need" : "Command needs" + } confirmation: ${confirmationToolNames.join(", ")}.\n\nFollowing ${ + denialToolNames.length > 1 ? "commands were" : "command was" + } denied: ${denialToolNames.join(", ")}.${ + ruleClause ? `\n\nAll${ruleClause}.` : "" + }`; } }; +type ResolvedPauseReason = { + tool_call_id: string; + type: string; + rawType?: string; + toolName: string; + command: string; + rule: string; + integr_config_path: string | null; +}; + +function isCacheGuardReason(reason: ToolConfirmationPauseReason): boolean { + return ( + reason.tool_name === "cache_guard" || + reason.tool_call_id.startsWith("cacheguard_") + ); +} + +function extractCacheGuardDiff(command: string): string { + const fenceStart = command.indexOf("```diff"); + if (fenceStart >= 0) { + const start = fenceStart + "```diff".length; + const fenceEnd = command.indexOf("```", start); + if (fenceEnd > start) { + return command.slice(start, fenceEnd).trim(); + } + } + return command; +} + +function extractEstimatedUsd(command: string): string | null { + const match = command.match(/`\$([0-9]+(?:\.[0-9]+)?)`\s*USD/); + return match?.[1] ?? null; +} + export const ToolConfirmation: React.FC<ToolConfirmationProps> = ({ pauseReasons, }) => { const dispatch = useAppDispatch(); - + const messages = useAppSelector(selectMessages); const chatId = useAppSelector(selectChatId); - const commands = pauseReasons.map((reason) => reason.command); - const rules = pauseReasons.map((reason) => reason.rule); - const types = pauseReasons.map((reason) => reason.type); - const toolCallIds = pauseReasons.map((reason) => reason.tool_call_id); + const toolCallsById = useMemo(() => { + const map = new Map<string, ToolCall>(); + for (const m of messages) { + if (!isAssistantMessage(m) || !m.tool_calls) continue; + for (const tc of m.tool_calls) { + if (tc.id) map.set(tc.id, tc); + } + } + return map; + }, [messages]); - const isPatchConfirmation = commands.some((command) => - PATCH_LIKE_FUNCTIONS.includes(command), + const resolvedReasons = useMemo((): ResolvedPauseReason[] => { + return pauseReasons.map((r) => { + let toolName = + r.tool_name || toolCallsById.get(r.tool_call_id)?.function.name; + if (!toolName) { + const cmd = r.command.trim(); + if (cmd) { + const firstWord = cmd.split(/\s+/)[0]; + if (firstWord && /^[a-z_]+$/.test(firstWord)) { + toolName = firstWord; + } + } + } + return { + tool_call_id: r.tool_call_id, + type: r.type, + rawType: r.raw_type, + toolName: toolName ?? "unknown", + command: r.command, + rule: r.rule, + integr_config_path: r.integr_config_path, + }; + }); + }, [pauseReasons, toolCallsById]); + + const toolCallIds = useMemo( + () => [...new Set(resolvedReasons.map((r) => r.tool_call_id))], + [resolvedReasons], ); + const toolNames = resolvedReasons.map((r) => r.toolName); + const types = resolvedReasons.map((r) => r.type); + const rules = [...new Set(resolvedReasons.map((r) => r.rule))]; - const integrationPaths = pauseReasons.map( - (reason) => reason.integr_config_path, + const isPatchConfirmation = resolvedReasons.every((r) => + PATCH_LIKE_FUNCTIONS.includes(r.toolName), ); - // assuming that at least one path out of all objects is not null so we can show up the link - const maybeIntegrationPath = integrationPaths.find((path) => path !== null); + const maybeIntegrationPath = resolvedReasons.find( + (r) => r.integr_config_path !== null, + )?.integr_config_path; - const allConfirmation = types.every((type) => type === "confirmation"); - const confirmationCommands = commands.filter( - (_, i) => types[i] === "confirmation", + const allConfirmation = resolvedReasons.every( + (r) => r.type === "confirmation", ); - const denialCommands = commands.filter((_, i) => types[i] === "denial"); + const isCacheGuardConfirmation = + pauseReasons.length > 0 && pauseReasons.every(isCacheGuardReason); + const confirmationToolNames = resolvedReasons + .filter((r) => r.type === "confirmation") + .map((r) => r.toolName); + const denialToolNames = resolvedReasons + .filter((r) => r.type === "denial") + .map((r) => r.toolName); + + const { respondToTools } = useChatActions(); - const { rejectToolUsage, confirmToolUsage } = useSendChatRequest(); + const confirmToolUsage = useCallback(() => { + const decisions = toolCallIds.map((id) => ({ + tool_call_id: id, + accepted: true, + })); + void respondToTools(decisions); + }, [respondToTools, toolCallIds]); - const handleAllowForThisChat = () => { - dispatch(setAutomaticPatch({ chatId, value: true })); - confirmToolUsage(); - }; + const rejectToolUsage = useCallback(() => { + const decisions = toolCallIds.map((id) => ({ + tool_call_id: id, + accepted: false, + })); + void respondToTools(decisions); + }, [respondToTools, toolCallIds]); + + const [isSettingAutoApprove, setIsSettingAutoApprove] = useState(false); + + const handleAllowForThisChat = useCallback(async () => { + setIsSettingAutoApprove(true); + try { + const { sendChatCommand } = await import( + "../../services/refact/chatCommands" + ); + const state = (await import("../../app/store")).store.getState(); + const port = state.config.lspPort; + const apiKey = state.config.apiKey; + if (port && chatId) { + await sendChatCommand(chatId, port, apiKey ?? undefined, { + type: "set_params", + patch: { auto_approve_editing_tools: true }, + }); + } + dispatch(setAutoApproveEditingTools({ chatId, value: true })); + confirmToolUsage(); + } finally { + setIsSettingAutoApprove(false); + } + }, [dispatch, chatId, confirmToolUsage]); const handleReject = useCallback(() => { - rejectToolUsage(toolCallIds); - }, [rejectToolUsage, toolCallIds]); + rejectToolUsage(); + }, [rejectToolUsage]); const message = getConfirmationMessage( - commands, + toolNames, rules, types, - confirmationCommands, - denialCommands, + confirmationToolNames, + denialToolNames, ); - if (isPatchConfirmation) { - // TODO: think of multiple toolcalls support + if (isCacheGuardConfirmation) { + return ( + <CacheGuardConfirmation + pauseReasons={pauseReasons} + confirmToolUsage={confirmToolUsage} + rejectToolUsage={handleReject} + /> + ); + } + + if (isPatchConfirmation && allConfirmation) { return ( <PatchConfirmation + pauseReasons={pauseReasons} + toolCallsById={toolCallsById} handleAllowForThisChat={handleAllowForThisChat} rejectToolUsage={handleReject} confirmToolUsage={confirmToolUsage} + isSettingAutoApprove={isSettingAutoApprove} /> ); } @@ -135,10 +254,21 @@ export const ToolConfirmation: React.FC<ToolConfirmationProps> = ({ <Text as="span">⚠️</Text> <Text>Model {allConfirmation ? "wants" : "tried"} to run:</Text> </Flex> - {commands.map((command, i) => ( - <Markdown - key={toolCallIds[i]} - >{`${"```bash\n"}${command}${"\n```"}`}</Markdown> + {resolvedReasons.map((r) => ( + <Flex key={r.tool_call_id} direction="column" gap="1"> + <Markdown>{`\`${r.toolName}\``}</Markdown> + {r.command && r.command !== r.toolName && ( + <Text + size="1" + color="gray" + style={{ fontFamily: "monospace", wordBreak: "break-all" }} + > + {r.command.length > 200 + ? r.command.slice(0, 200) + "..." + : r.command} + </Text> + )} + </Flex> ))} <Text className={styles.ToolConfirmationText}> <Markdown color="indigo">{message.concat("\n\n")}</Markdown> @@ -188,38 +318,104 @@ export const ToolConfirmation: React.FC<ToolConfirmationProps> = ({ ); }; +type CacheGuardConfirmationProps = { + pauseReasons: ToolConfirmationPauseReason[]; + rejectToolUsage: () => void; + confirmToolUsage: () => void; +}; + +const CacheGuardConfirmation: React.FC<CacheGuardConfirmationProps> = ({ + pauseReasons, + rejectToolUsage, + confirmToolUsage, +}) => { + const details = pauseReasons[0].command; + const diff = extractCacheGuardDiff(details); + const estimatedUsd = extractEstimatedUsd(details); + + return ( + <Card className={styles.ToolConfirmationCard}> + <Flex direction="column" gap="3"> + <Flex + align="baseline" + gap="1" + className={styles.ToolConfirmationHeading} + > + <Text as="span">⚠️</Text> + <Text>Prompt cache may be broken</Text> + </Flex> + + {estimatedUsd && ( + <Text className={styles.ToolConfirmationText}> + Estimated extra cost: <strong>${estimatedUsd} USD</strong> + </Text> + )} + + <Text className={styles.ToolConfirmationText}> + Force will allow this request once and refresh cache snapshot. + </Text> + + <pre className={styles.CacheGuardDiff}>{diff}</pre> + + <Flex align="end" justify="start" gap="2" direction="row"> + <Button + color="grass" + variant="surface" + size="1" + onClick={confirmToolUsage} + > + Force and Continue + </Button> + <Button + color="red" + variant="surface" + size="1" + onClick={rejectToolUsage} + > + Stop + </Button> + </Flex> + </Flex> + </Card> + ); +}; + type PatchConfirmationProps = { - handleAllowForThisChat: () => void; + pauseReasons: ToolConfirmationPauseReason[]; + toolCallsById: Map<string, ToolCall>; + handleAllowForThisChat: () => Promise<void>; rejectToolUsage: () => void; confirmToolUsage: () => void; + isSettingAutoApprove?: boolean; }; const PatchConfirmation: React.FC<PatchConfirmationProps> = ({ + pauseReasons, + toolCallsById, handleAllowForThisChat, confirmToolUsage, rejectToolUsage, + isSettingAutoApprove, }) => { - const messages = useAppSelector(selectMessages); - const assistantMessages = messages.filter(isAssistantMessage); - const lastAssistantMessage = useMemo( - () => assistantMessages[assistantMessages.length - 1], - [assistantMessages], - ); - const toolCalls = lastAssistantMessage.tool_calls; - - if (!toolCalls) return; - - const parsedArgsFromToolCall = JSON.parse( - toolCalls[0].function.arguments, - ) as { - path: string; - tickets: string; - }; - const extractedFileNameFromPath = - parsedArgsFromToolCall.path.split(/[/\\]/)[ - parsedArgsFromToolCall.path.split(/[/\\]/).length - 1 - ]; - const messageForPatch = "Patch " + "`" + extractedFileNameFromPath + "`"; + const messageForPatch = useMemo(() => { + const filenames: string[] = []; + for (const reason of pauseReasons) { + const tc = toolCallsById.get(reason.tool_call_id); + if (!tc) continue; + try { + const parsed = JSON.parse(tc.function.arguments) as { path?: string }; + if (parsed.path) { + const parts = parsed.path.split(/[/\\]/); + filenames.push(parts[parts.length - 1]); + } + } catch { + continue; + } + } + if (filenames.length === 0) return "Apply changes"; + const uniqueFilenames = [...new Set(filenames)]; + return `Patch ${uniqueFilenames.map((f) => `\`${f}\``).join(", ")}`; + }, [pauseReasons, toolCallsById]); return ( <Card className={styles.ToolConfirmationCard}> @@ -249,9 +445,10 @@ const PatchConfirmation: React.FC<PatchConfirmationProps> = ({ color="grass" variant="surface" size="1" - onClick={handleAllowForThisChat} + onClick={() => void handleAllowForThisChat()} + disabled={isSettingAutoApprove} > - Allow for This Chat + {isSettingAutoApprove ? "Setting..." : "Allow for This Chat"} </Button> <Button color="grass" diff --git a/refact-agent/gui/src/components/ChatForm/ToolUseSwitch.tsx b/refact-agent/gui/src/components/ChatForm/ToolUseSwitch.tsx deleted file mode 100644 index 9b0a7de60..000000000 --- a/refact-agent/gui/src/components/ChatForm/ToolUseSwitch.tsx +++ /dev/null @@ -1,63 +0,0 @@ -import React from "react"; -import { Flex, SegmentedControl, Text, HoverCard } from "@radix-ui/themes"; -import { ToolUse } from "../../features/Chat/Thread"; -import { QuestionMarkCircledIcon } from "@radix-ui/react-icons"; - -type ToolUseSwitchProps = { - toolUse: ToolUse; - setToolUse: (toolUse: ToolUse) => void; -}; - -export const ToolUseSwitch = React.forwardRef< - HTMLDivElement, - ToolUseSwitchProps ->(({ toolUse, setToolUse }, ref) => { - return ( - <Flex direction="column" gap="3" mb="2" align="start" ref={ref}> - <Text size="2">How fast do you want the answer:</Text> - <Flex direction="row" gap="1" align="center"> - <SegmentedControl.Root - defaultValue="quick" - value={toolUse} - onValueChange={(x) => { - setToolUse(x as ToolUse); - }} - > - <SegmentedControl.Item value="quick">Quick</SegmentedControl.Item> - <SegmentedControl.Item value="explore">Explore</SegmentedControl.Item> - <SegmentedControl.Item value="agent">Agent</SegmentedControl.Item> - </SegmentedControl.Root> - <HoverCard.Root> - <HoverCard.Trigger> - <QuestionMarkCircledIcon style={{ marginLeft: 4 }} /> - </HoverCard.Trigger> - <HoverCard.Content size="2" maxWidth="280px"> - <Text weight="bold">Quick</Text> - <Text as="p" size="2"> - The model doesn&apos;t have access to any tools and answers - immediately. You still can provide context using @-commands, try - @help. - </Text> - <Text as="div" mt="2" weight="bold"> - Explore - </Text> - <Text as="p" size="2"> - The model has access to exploration tools and collects the - necessary context for you. - </Text> - <Text as="div" mt="2" weight="bold"> - Agent - </Text> - <Text as="p" size="2"> - The model has agent capabilities, might take a long time to - respond. For example it can provide a high-quality context to - solve a problem. - </Text> - </HoverCard.Content> - </HoverCard.Root> - </Flex> - </Flex> - ); -}); - -ToolUseSwitch.displayName = "ToolUseSwitch"; diff --git a/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.module.css b/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.module.css new file mode 100644 index 000000000..0e3d49e8f --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.module.css @@ -0,0 +1,4 @@ +.tray { + padding: 0; + margin-bottom: var(--space-2); +} diff --git a/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.tsx b/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.tsx new file mode 100644 index 000000000..e18af4a8d --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/UnifiedAttachmentsTray.tsx @@ -0,0 +1,149 @@ +import React, { useMemo } from "react"; +import { Flex } from "@radix-ui/themes"; +import { AttachmentTile, AttachmentTileProps } from "./AttachmentTile"; +import { useAttachedImages } from "../../hooks/useAttachedImages"; +import { useAttachedFiles } from "./useCheckBoxes"; +import { ChatContextFile } from "../../services/refact"; +import styles from "./UnifiedAttachmentsTray.module.css"; + +type UnifiedAttachmentsTrayProps = { + attachedFiles: ReturnType<typeof useAttachedFiles>; + previewFiles?: (ChatContextFile | string)[]; + onOpenFile?: (file: { + file_path: string; + line?: number; + }) => void | Promise<void>; +}; + +function getFilename(path: string): string { + const parts = path.split(/[/\\]/); + return parts[parts.length - 1] || path; +} + +function formatLineRange( + line1: number | null | undefined, + line2: number | null | undefined, +): string { + const hasLine1 = typeof line1 === "number" && line1 > 0; + const hasLine2 = typeof line2 === "number" && line2 > 0; + if (hasLine1 && hasLine2) return `:${line1}-${line2}`; + if (hasLine1) return `:${line1}`; + return ""; +} + +function normalizeLine(line: number | null | undefined): number | undefined { + return typeof line === "number" && line > 0 ? line : undefined; +} + +export const UnifiedAttachmentsTray: React.FC<UnifiedAttachmentsTrayProps> = ({ + attachedFiles, + previewFiles, + onOpenFile, +}) => { + const { images, removeImage, textFiles, removeTextFile } = + useAttachedImages(); + + const items = useMemo(() => { + const result: AttachmentTileProps[] = []; + // Track added file paths to avoid duplicates between attachedFiles and previewFiles + const addedFilePaths = new Set<string>(); + + images.forEach((image, index) => { + if (typeof image.content === "string") { + result.push({ + kind: "image", + id: `image-${image.name}-${index}`, + name: image.name, + src: image.content, + onRemove: () => removeImage(index), + }); + } + }); + + textFiles.forEach((file, index) => { + result.push({ + kind: "file", + id: `textfile-${file.name}-${index}`, + name: file.name, + copyText: file.name, + onRemove: () => removeTextFile(index), + }); + }); + + attachedFiles.files.forEach((file, index) => { + const lineRange = formatLineRange(file.line1, file.line2); + addedFilePaths.add(file.path); + result.push({ + kind: "file", + id: `attached-${file.path}-${index}`, + name: getFilename(file.path), + copyText: `@file ${file.path}${lineRange}`, + subtitle: lineRange || undefined, + onRemove: () => attachedFiles.removeFile(file), + onOpen: onOpenFile + ? () => + onOpenFile({ + file_path: file.path, + line: normalizeLine(file.line1), + }) + : undefined, + }); + }); + + if (previewFiles) { + previewFiles.forEach((file, index) => { + if (typeof file === "string") { + result.push({ + kind: "plain-text", + id: `plain-text-${index}`, + label: "plain text", + preview: file, + copyText: file, + }); + } else { + // Skip if this file was already added from attachedFiles + if (addedFilePaths.has(file.file_name)) { + return; + } + const lineRange = formatLineRange(file.line1, file.line2); + result.push({ + kind: "file", + id: `preview-${file.file_name}-${index}`, + name: getFilename(file.file_name), + copyText: `@file ${file.file_name}${lineRange}`, + subtitle: lineRange || undefined, + onOpen: onOpenFile + ? () => + onOpenFile({ + file_path: file.file_name, + line: normalizeLine(file.line1), + }) + : undefined, + }); + } + }); + } + + return result; + }, [ + images, + textFiles, + attachedFiles, + previewFiles, + removeImage, + removeTextFile, + onOpenFile, + ]); + + if (items.length === 0) { + return null; + } + + return ( + <Flex wrap="wrap" gap="2" className={styles.tray}> + {items.map((item) => ( + <AttachmentTile key={item.id} {...item} /> + ))} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/components/ChatForm/constants.ts b/refact-agent/gui/src/components/ChatForm/constants.ts new file mode 100644 index 000000000..3c142719a --- /dev/null +++ b/refact-agent/gui/src/components/ChatForm/constants.ts @@ -0,0 +1,12 @@ +export const PATCH_LIKE_FUNCTIONS = [ + "patch", + "text_edit", + "create_textdoc", + "update_textdoc", + "replace_textdoc", + "update_textdoc_regex", + "update_textdoc_by_lines", + "update_textdoc_anchored", + "apply_patch", + "undo_textdoc", +]; diff --git a/refact-agent/gui/src/components/ChatForm/useCheckBoxes.ts b/refact-agent/gui/src/components/ChatForm/useCheckBoxes.ts index aa5147e07..68d4280a5 100644 --- a/refact-agent/gui/src/components/ChatForm/useCheckBoxes.ts +++ b/refact-agent/gui/src/components/ChatForm/useCheckBoxes.ts @@ -2,12 +2,29 @@ import { useState, useMemo, useCallback, useEffect } from "react"; import { selectSelectedSnippet } from "../../features/Chat/selectedSnippet"; import { FileInfo, selectActiveFile } from "../../features/Chat/activeFile"; import { useConfig, useAppSelector } from "../../hooks"; -import type { Checkbox } from "./ChatControls"; import { selectMessages } from "../../features/Chat/Thread/selectors"; import { createSelector } from "@reduxjs/toolkit"; import { filename } from "../../utils"; import { ideAttachFileToChat } from "../../hooks"; +type CheckboxHelp = { + text: string; + link?: string; + linkText?: string; +}; + +export type Checkbox = { + name: string; + label: string; + checked: boolean; + value?: string; + disabled: boolean; + fileName?: string; + hide?: boolean; + info?: CheckboxHelp; + locked?: boolean; +}; + const messageLengthSelector = createSelector( [selectMessages], (messages) => messages.length, @@ -137,7 +154,7 @@ const useAttachSelectedSnippet = ( label: label, value: markdown, disabled: !snippet.code, - hide: host === "web", + hide: host === "web" || codeLineCount === 0, info: { text: "Adds the currently selected lines as a snippet for analysis or modification. Equivalent to code in triple backticks ``` in the text.", }, @@ -152,9 +169,9 @@ const useAttachSelectedSnippet = ( label: label, value: markdown, disabled: !snippet.code, - hide: host === "web", + hide: host === "web" || codeLineCount === 0, checked: !!snippet.code && !interacted, - locked: false, + locked: interacted ? prev.locked : false, }; }); } @@ -165,6 +182,7 @@ const useAttachSelectedSnippet = ( markdown, interacted, attachedSelectedSnippet.checked, + codeLineCount, ]); const onToggleAttachedSelectedSnippet = useCallback(() => { diff --git a/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts b/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts index c316ab29f..93c0ba495 100644 --- a/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts +++ b/refact-agent/gui/src/components/ChatForm/useCommandCompletionAndPreviewFiles.ts @@ -1,20 +1,25 @@ import { useState, useEffect, useMemo, useCallback } from "react"; import { useDebounceCallback } from "usehooks-ts"; import { Checkboxes } from "./useCheckBoxes"; -import { useAppSelector, useHasCaps, useSendChatRequest } from "../../hooks"; +import { useAppSelector, useHasCaps } from "../../hooks"; import { addCheckboxValuesToInput } from "./utils"; import { type CommandCompletionResponse, commandsApi, } from "../../services/refact/commands"; -import { ChatContextFile, ChatMeta } from "../../services/refact/types"; +import { + ChatContextFile, + ChatMeta, + UserMessage, + UserMessageContentWithImage, +} from "../../services/refact/types"; import type { LspChatMessage } from "../../services/refact"; import { getSelectedChatModel, selectChatId, - selectIsStreaming, selectMessages, selectThreadMode, + selectThreadImages, } from "../../features/Chat"; import { formatMessagesForLsp } from "../../features/Chat/Thread/utils"; @@ -76,15 +81,35 @@ function useGetCommandPreviewQuery( query: string, ): (ChatContextFile | string)[] { const hasCaps = useHasCaps(); - const { maybeAddImagesToQuestion } = useSendChatRequest(); + const attachedImages = useAppSelector(selectThreadImages); const messages = useAppSelector(selectMessages); const chatId = useAppSelector(selectChatId); - const isStreaming = useAppSelector(selectIsStreaming); const currentThreadMode = useAppSelector(selectThreadMode); const currentModel = useAppSelector(getSelectedChatModel); - const userMessage = maybeAddImagesToQuestion(query); + const userMessage: UserMessage = useMemo(() => { + if (attachedImages.length === 0) { + return { role: "user", content: query, checkpoints: [] }; + } + + const images: UserMessageContentWithImage[] = attachedImages + .filter((img) => typeof img.content === "string") + .map((img) => ({ + type: "image_url" as const, + image_url: { url: img.content as string }, + })); + + if (images.length === 0) { + return { role: "user", content: query, checkpoints: [] }; + } + + return { + role: "user", + content: [...images, { type: "text" as const, text: query }], + checkpoints: [], + }; + }, [query, attachedImages]); const messagesToSend: LspChatMessage[] = formatMessagesForLsp([ ...messages, @@ -99,7 +124,7 @@ function useGetCommandPreviewQuery( const { data } = commandsApi.useGetCommandPreviewQuery( { messages: messagesToSend, meta: metaToSend, model: currentModel }, { - skip: !hasCaps || isStreaming, + skip: !hasCaps, }, ); diff --git a/refact-agent/gui/src/components/ChatForm/useInputValue.ts b/refact-agent/gui/src/components/ChatForm/useInputValue.ts index e55ad46bb..70b0da570 100644 --- a/refact-agent/gui/src/components/ChatForm/useInputValue.ts +++ b/refact-agent/gui/src/components/ChatForm/useInputValue.ts @@ -1,12 +1,10 @@ -import { useCallback, useEffect, useState } from "react"; -import { - useAppDispatch, - useAppSelector, - useSendChatRequest, -} from "../../hooks"; +import React, { useCallback, useEffect } from "react"; +import { useAppDispatch, useAppSelector } from "../../hooks"; import { selectPages, change, ChatPage } from "../../features/Pages/pagesSlice"; import { setInputValue, addInputValue } from "./actions"; import { debugRefact } from "../../debugConfig"; +import { useDraftMessage } from "../../hooks/useDraftMessage"; +import { sendIdeMessagesToCurrentChat } from "../../features/Chat/Thread/actions"; export function useInputValue( uncheckCheckboxes: () => void, @@ -16,9 +14,9 @@ export function useInputValue( boolean, React.Dispatch<React.SetStateAction<boolean>>, ] { - const [value, setValue] = useState<string>(""); - const [isSendImmediately, setIsSendImmediately] = useState<boolean>(false); - const { submit } = useSendChatRequest(); + const { value, setValue } = useDraftMessage(); + const [isSendImmediately, setIsSendImmediately] = + React.useState<boolean>(false); const dispatch = useAppDispatch(); const pages = useAppSelector(selectPages); @@ -40,12 +38,15 @@ export function useInputValue( ); setUpIfNotReady(); - if (payload.messages) { + if (payload.messages && payload.messages.length > 0) { debugRefact(`[DEBUG]: payload messages: `, payload.messages); - setIsSendImmediately(true); - submit({ - maybeMessages: payload.messages, - }); + setIsSendImmediately(payload.send_immediately); + void dispatch( + sendIdeMessagesToCurrentChat({ + messages: payload.messages, + priority: payload.send_immediately, + }), + ); return; } } @@ -73,7 +74,7 @@ export function useInputValue( return; } }, - [setUpIfNotReady, submit, uncheckCheckboxes], + [setUpIfNotReady, dispatch, uncheckCheckboxes, setValue], ); useEffect(() => { diff --git a/refact-agent/gui/src/components/ChatHistory/ChatHistory.tsx b/refact-agent/gui/src/components/ChatHistory/ChatHistory.tsx index 1e5285499..88cccd24f 100644 --- a/refact-agent/gui/src/components/ChatHistory/ChatHistory.tsx +++ b/refact-agent/gui/src/components/ChatHistory/ChatHistory.tsx @@ -1,30 +1,411 @@ -import { memo } from "react"; -import { Flex, Box, Text } from "@radix-ui/themes"; +import { memo, useState, useCallback, useRef, useEffect, useMemo } from "react"; +import { Flex, Box, Text, Spinner, Button } from "@radix-ui/themes"; +import { ChatLoading } from "../ChatContent/ChatLoading"; import { ScrollArea } from "../ScrollArea"; import { HistoryItem } from "./HistoryItem"; +import { HistoryItemCompact } from "./HistoryItemCompact"; +import { TaskItemCompact } from "./TaskItemCompact"; import { ChatHistoryItem, - getHistory, - type HistoryState, + HistoryTreeNode, + buildHistoryTree, + isTaskChatLike, } from "../../features/History/historySlice"; +import type { TaskMeta } from "../../services/refact/tasks"; export type ChatHistoryProps = { - history: HistoryState; + history: Record<string, ChatHistoryItem>; + tasks?: TaskMeta[]; + isLoading?: boolean; onHistoryItemClick: (id: ChatHistoryItem) => void; onDeleteHistoryItem: (id: string) => void; + onRenameHistoryItem?: (id: string, newTitle: string) => void; onOpenChatInTab?: (id: string) => void; + onTaskClick?: (taskId: string) => void; + onDeleteTask?: (taskId: string) => void; + onRenameTask?: (taskId: string, newName: string) => void; currentChatId?: string; + treeView?: boolean; + compactView?: boolean; + onLoadMore?: () => void; + hasMore?: boolean; + isLoadingMore?: boolean; + loadMoreError?: string | null; + onRetryLoadMore?: () => void; + hasConnectionError?: boolean; + noScroll?: boolean; + scrollContainerRef?: React.RefObject<HTMLDivElement>; }; +type TreeNodeProps = { + node: HistoryTreeNode; + depth: number; + onHistoryItemClick: (id: ChatHistoryItem) => void; + onDeleteHistoryItem: (id: string) => void; + onRenameHistoryItem?: (id: string, newTitle: string) => void; + onOpenChatInTab?: (id: string) => void; + currentChatId?: string; + expandedIds: Set<string>; + onToggleExpand: (id: string) => void; + compactView?: boolean; +}; + +function getBadgeForNode( + node: HistoryTreeNode, + depth: number, +): string | undefined { + const isTask = !!node.task_id; + const linkType = node.link_type; + const isHandoffParent = depth > 0 && !linkType && !isTask; + + if (isTask) { + return node.task_role === "planner" + ? "Planner" + : node.task_role === "agents" + ? "Agent" + : undefined; + } + if (linkType === "subagent") return "Subagent"; + if (linkType === "handoff") return "Handoff"; + if (linkType === "mode_transition") return "Mode Switch"; + if (linkType === "branch") return "Branched"; + if (isHandoffParent) return "Original"; + return undefined; +} + +const TreeNode = memo( + ({ + node, + depth, + onHistoryItemClick, + onDeleteHistoryItem, + onRenameHistoryItem, + onOpenChatInTab, + currentChatId, + expandedIds, + onToggleExpand, + compactView = false, + }: TreeNodeProps) => { + const hasChildren = node.children.length > 0; + const isExpanded = expandedIds.has(node.id); + const badge = getBadgeForNode(node, depth); + + return ( + <Box style={{ width: "100%" }}> + {compactView ? ( + <HistoryItemCompact + historyItem={node} + onClick={() => onHistoryItemClick(node)} + onDelete={onDeleteHistoryItem} + onRename={onRenameHistoryItem} + disabled={node.id === currentChatId} + badge={badge} + childCount={hasChildren ? node.children.length : undefined} + isExpanded={isExpanded} + onToggleExpand={ + hasChildren ? () => onToggleExpand(node.id) : undefined + } + isChild={depth > 0} + /> + ) : ( + <Box style={{ paddingLeft: depth * 16 }}> + <HistoryItem + onClick={() => onHistoryItemClick(node)} + onOpenInTab={onOpenChatInTab} + onDelete={onDeleteHistoryItem} + historyItem={node} + disabled={node.id === currentChatId} + badge={badge} + childCount={hasChildren ? node.children.length : undefined} + isExpanded={isExpanded} + onToggleExpand={ + hasChildren ? () => onToggleExpand(node.id) : undefined + } + /> + </Box> + )} + {hasChildren && isExpanded && ( + <Flex direction="column" gap="1" pt="1"> + {node.children.map((child) => ( + <TreeNode + key={child.id} + node={child} + depth={depth + 1} + onHistoryItemClick={onHistoryItemClick} + onDeleteHistoryItem={onDeleteHistoryItem} + onRenameHistoryItem={onRenameHistoryItem} + onOpenChatInTab={onOpenChatInTab} + currentChatId={currentChatId} + expandedIds={expandedIds} + onToggleExpand={onToggleExpand} + compactView={compactView} + /> + ))} + </Flex> + )} + </Box> + ); + }, +); + +TreeNode.displayName = "TreeNode"; + +type UnifiedItem = + | { type: "chat"; item: ChatHistoryItem } + | { type: "tree"; item: HistoryTreeNode } + | { type: "task"; item: TaskMeta }; + +function getActiveTasks(tasks: TaskMeta[] = []): TaskMeta[] { + return tasks.filter( + (t) => + t.status === "active" || t.status === "planning" || t.status === "paused", + ); +} + +function getUpdatedAt(item: UnifiedItem): string { + switch (item.type) { + case "chat": + case "tree": + return item.item.updatedAt; + case "task": + return item.item.updated_at; + } +} + +function getSortedUnifiedList( + history: Record<string, ChatHistoryItem>, + tasks: TaskMeta[] = [], + useTree: boolean, + historyTree: HistoryTreeNode[], +): UnifiedItem[] { + const activeTasks = getActiveTasks(tasks); + + if (useTree) { + // In tree mode, merge tree root nodes with tasks + const treeItems: UnifiedItem[] = historyTree.map((item) => ({ + type: "tree" as const, + item, + })); + + const taskItems: UnifiedItem[] = activeTasks.map((item) => ({ + type: "task" as const, + item, + })); + + return [...treeItems, ...taskItems].sort((a, b) => + getUpdatedAt(b).localeCompare(getUpdatedAt(a)), + ); + } + + // In flat mode, merge chats with tasks + const chatItems: UnifiedItem[] = Object.values(history) + .filter((item) => !isTaskChatLike(item)) + .map((item) => ({ type: "chat" as const, item })); + + const taskItems: UnifiedItem[] = activeTasks.map((item) => ({ + type: "task" as const, + item, + })); + + return [...chatItems, ...taskItems].sort((a, b) => + getUpdatedAt(b).localeCompare(getUpdatedAt(a)), + ); +} + +function hasChildChatsInHistory( + history: Record<string, ChatHistoryItem>, +): boolean { + return Object.values(history).some((item) => !!item.parent_id); +} + export const ChatHistory = memo( ({ history, + tasks = [], onHistoryItemClick, onDeleteHistoryItem, + onRenameHistoryItem, onOpenChatInTab, + onTaskClick, + onDeleteTask, + onRenameTask, currentChatId, + treeView = false, + compactView = true, + isLoading = false, + onLoadMore, + hasMore = false, + isLoadingMore = false, + loadMoreError, + onRetryLoadMore, + hasConnectionError = false, + noScroll = false, + scrollContainerRef, }: ChatHistoryProps) => { - const sortedHistory = getHistory({ history }); + const historyTree = useMemo(() => buildHistoryTree(history), [history]); + const hasChildChats = useMemo( + () => hasChildChatsInHistory(history), + [history], + ); + const showTree = treeView || hasChildChats; + const unifiedList = useMemo( + () => getSortedUnifiedList(history, tasks, showTree, historyTree), + [history, tasks, showTree, historyTree], + ); + const [expandedIds, setExpandedIds] = useState<Set<string>>(new Set()); + const loadMoreRef = useRef<HTMLDivElement>(null); + + const handleToggleExpand = useCallback((id: string) => { + setExpandedIds((prev) => { + const next = new Set(prev); + if (next.has(id)) { + next.delete(id); + } else { + next.add(id); + } + return next; + }); + }, []); + + useEffect(() => { + if (!onLoadMore || !hasMore || isLoadingMore) return; + + const loadMoreElement = loadMoreRef.current; + if (!loadMoreElement) return; + + // Find the scroll container - either passed ref or use viewport + const root = scrollContainerRef?.current ?? null; + + const observer = new IntersectionObserver( + (entries) => { + if (entries[0]?.isIntersecting) { + onLoadMore(); + } + }, + { + threshold: 0.1, + root, + }, + ); + + observer.observe(loadMoreElement); + + return () => { + observer.disconnect(); + }; + }, [onLoadMore, hasMore, isLoadingMore, scrollContainerRef]); + + const content = ( + <Flex + justify="center" + align={unifiedList.length > 0 ? "center" : "start"} + pl="1" + pr="1" + gap="1" + direction="column" + > + {isLoading ? ( + <Box style={{ width: "100%" }}> + <ChatLoading /> + </Box> + ) : unifiedList.length !== 0 ? ( + <> + {unifiedList.map((unified) => { + if (unified.type === "task") { + return ( + <Box + key={`task-${unified.item.id}`} + style={{ width: "100%" }} + > + <TaskItemCompact + task={unified.item} + onClick={() => onTaskClick?.(unified.item.id)} + onDelete={(id) => onDeleteTask?.(id)} + onRename={onRenameTask} + badge="Task" + /> + </Box> + ); + } + if (unified.type === "tree") { + return ( + <TreeNode + key={unified.item.id} + node={unified.item} + depth={0} + onHistoryItemClick={onHistoryItemClick} + onDeleteHistoryItem={onDeleteHistoryItem} + onRenameHistoryItem={onRenameHistoryItem} + onOpenChatInTab={onOpenChatInTab} + currentChatId={currentChatId} + expandedIds={expandedIds} + onToggleExpand={handleToggleExpand} + compactView={compactView} + /> + ); + } + // type === "chat" + return compactView ? ( + <Box key={unified.item.id} style={{ width: "100%" }}> + <HistoryItemCompact + historyItem={unified.item} + onClick={() => onHistoryItemClick(unified.item)} + onDelete={onDeleteHistoryItem} + onRename={onRenameHistoryItem} + disabled={unified.item.id === currentChatId} + /> + </Box> + ) : ( + <Box key={unified.item.id} style={{ width: "100%" }}> + <HistoryItem + onClick={() => onHistoryItemClick(unified.item)} + onOpenInTab={onOpenChatInTab} + onDelete={onDeleteHistoryItem} + historyItem={unified.item} + disabled={unified.item.id === currentChatId} + /> + </Box> + ); + })} + {loadMoreError && onRetryLoadMore && ( + <Flex + py="2" + direction="column" + align="center" + gap="2" + style={{ width: "100%" }} + > + <Text size="1" color="red"> + {loadMoreError} + </Text> + <Button size="1" variant="soft" onClick={onRetryLoadMore}> + Retry + </Button> + </Flex> + )} + {hasMore && !loadMoreError && ( + <Box ref={loadMoreRef} py="2" style={{ width: "100%" }}> + {isLoadingMore ? ( + <Flex justify="center"> + <Spinner size="2" /> + </Flex> + ) : ( + <Box style={{ height: 1 }} /> + )} + </Box> + )} + </> + ) : ( + <Text size="2" color={hasConnectionError ? "red" : "gray"}> + {hasConnectionError ? "Unable to load" : "No chats yet"} + </Text> + )} + </Flex> + ); + + if (noScroll) { + return <Box pb="2">{content}</Box>; + } return ( <Box @@ -34,33 +415,7 @@ export const ChatHistory = memo( pb="2" flexGrow="1" > - <ScrollArea scrollbars="vertical"> - <Flex - justify="center" - align={sortedHistory.length > 0 ? "center" : "start"} - pl="2" - pr="2" - direction="column" - > - {sortedHistory.length !== 0 ? ( - sortedHistory.map((item) => ( - <HistoryItem - onClick={() => onHistoryItemClick(item)} - onOpenInTab={onOpenChatInTab} - onDelete={onDeleteHistoryItem} - key={item.id} - historyItem={item} - disabled={item.id === currentChatId} - /> - )) - ) : ( - <Text as="p" size="2" mt="2"> - Your chat history is currently empty. Click &quot;New Chat&quot; - to start a conversation. - </Text> - )} - </Flex> - </ScrollArea> + <ScrollArea scrollbars="vertical">{content}</ScrollArea> </Box> ); }, diff --git a/refact-agent/gui/src/components/ChatHistory/CircularProgress.tsx b/refact-agent/gui/src/components/ChatHistory/CircularProgress.tsx new file mode 100644 index 000000000..578373d22 --- /dev/null +++ b/refact-agent/gui/src/components/ChatHistory/CircularProgress.tsx @@ -0,0 +1,72 @@ +import React from "react"; +import { HoverCard, Text } from "@radix-ui/themes"; + +export interface CircularProgressProps { + done: number; + total: number; + failed?: number; + size?: number; +} + +export const CircularProgress: React.FC<CircularProgressProps> = ({ + done, + total, + failed = 0, + size = 16, +}) => { + const hasError = failed > 0; + const progress = total > 0 ? done / total : 0; + const strokeWidth = 2; + const radius = (size - strokeWidth) / 2; + const circumference = 2 * Math.PI * radius; + const strokeDashoffset = circumference * (1 - progress); + + const progressColor = hasError ? "var(--red-9)" : "var(--green-9)"; + const trackColor = "var(--gray-6)"; + + const tooltip = hasError + ? `${done}/${total} completed, ${failed} failed` + : `${done}/${total} completed`; + + return ( + <HoverCard.Root openDelay={200} closeDelay={100}> + <HoverCard.Trigger> + <svg + width={size} + height={size} + viewBox={`0 0 ${size} ${size}`} + style={{ transform: "rotate(-90deg)", flexShrink: 0 }} + aria-label={tooltip} + > + {/* Background track */} + <circle + cx={size / 2} + cy={size / 2} + r={radius} + fill="none" + stroke={trackColor} + strokeWidth={strokeWidth} + /> + {/* Progress arc */} + <circle + cx={size / 2} + cy={size / 2} + r={radius} + fill="none" + stroke={progressColor} + strokeWidth={strokeWidth} + strokeDasharray={circumference} + strokeDashoffset={strokeDashoffset} + strokeLinecap="round" + style={{ transition: "stroke-dashoffset 0.3s ease" }} + /> + </svg> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <Text as="p" size="1"> + {tooltip} + </Text> + </HoverCard.Content> + </HoverCard.Root> + ); +}; diff --git a/refact-agent/gui/src/components/ChatHistory/HistoryItem.tsx b/refact-agent/gui/src/components/ChatHistory/HistoryItem.tsx index e5a6c9d38..ecac85b05 100644 --- a/refact-agent/gui/src/components/ChatHistory/HistoryItem.tsx +++ b/refact-agent/gui/src/components/ChatHistory/HistoryItem.tsx @@ -1,14 +1,24 @@ import React, { useMemo } from "react"; -import { Card, Flex, Text, Box, Spinner } from "@radix-ui/themes"; -import { ChatBubbleIcon, DotFilledIcon } from "@radix-ui/react-icons"; +import { Card, Flex, Text, Box, Spinner, Badge } from "@radix-ui/themes"; +import { + ChatBubbleIcon, + ChevronDownIcon, + ChevronRightIcon, + PauseIcon, + CrossCircledIcon, + CheckCircledIcon, +} from "@radix-ui/react-icons"; import { CloseButton } from "../Buttons/Buttons"; import { IconButton } from "@radix-ui/themes"; import { OpenInNewWindowIcon } from "@radix-ui/react-icons"; import type { ChatHistoryItem } from "../../features/History/historySlice"; -import { isUserMessage } from "../../services/refact"; -import { useAppSelector } from "../../hooks"; -import { getTotalCostMeteringForMessages } from "../../utils/getMetering"; +import { + getTotalCostMeteringForMessages, + getTotalUsdMeteringForMessages, + formatUsd, +} from "../../utils/getMetering"; import { Coin } from "../../images"; +import { getStatusFromSessionState } from "../../utils/sessionStatus"; export const HistoryItem: React.FC<{ historyItem: ChatHistoryItem; @@ -16,31 +26,50 @@ export const HistoryItem: React.FC<{ onDelete: (id: string) => void; onOpenInTab?: (id: string) => void; disabled: boolean; -}> = ({ historyItem, onClick, onDelete, onOpenInTab, disabled }) => { + badge?: string; + childCount?: number; + isExpanded?: boolean; + onToggleExpand?: () => void; +}> = ({ + historyItem, + onClick, + onDelete, + onOpenInTab, + disabled, + badge, + childCount, + isExpanded, + onToggleExpand, +}) => { const dateCreated = new Date(historyItem.createdAt); const dateTimeString = dateCreated.toLocaleString(); - const cache = useAppSelector((app) => app.chat.cache); - - const totalCost = useMemo(() => { + const totalCoins = useMemo(() => { const totals = getTotalCostMeteringForMessages(historyItem.messages); - if (totals === null) return null; - - return ( + const sum = totals.metering_coins_cache_creation + totals.metering_coins_cache_read + totals.metering_coins_generated + - totals.metering_coins_prompt - ); + totals.metering_coins_prompt; + return sum > 0 ? sum : null; }, [historyItem.messages]); - const isStreaming = historyItem.id in cache; + const totalUsd = useMemo(() => { + const usd = getTotalUsdMeteringForMessages(historyItem.messages); + if (usd === null || usd.total_usd <= 0) return null; + return usd.total_usd; + }, [historyItem.messages]); + + const statusState = getStatusFromSessionState(historyItem.session_state); + const isWorking = statusState === "in_progress"; + const isPaused = statusState === "needs_attention"; + const isError = statusState === "error"; + const isCompleted = statusState === "completed"; return ( <Box style={{ position: "relative", width: "100%" }}> <Card style={{ width: "100%", - marginBottom: "2px", opacity: disabled ? 0.8 : 1, }} variant="surface" @@ -56,10 +85,26 @@ export const HistoryItem: React.FC<{ onClick(); }} > - <Flex gap="2px" align="center"> - {isStreaming && <Spinner style={{ minWidth: 16, minHeight: 16 }} />} - {!isStreaming && historyItem.read === false && ( - <DotFilledIcon style={{ minWidth: 16, minHeight: 16 }} /> + <Flex gap="1" align="center"> + {isWorking && <Spinner style={{ minWidth: 16, minHeight: 16 }} />} + {!isWorking && isPaused && ( + <PauseIcon + style={{ + minWidth: 16, + minHeight: 16, + color: "var(--yellow-9)", + }} + /> + )} + {!isWorking && !isPaused && isError && ( + <CrossCircledIcon + style={{ minWidth: 16, minHeight: 16, color: "var(--red-9)" }} + /> + )} + {!isWorking && !isPaused && !isError && isCompleted && ( + <CheckCircledIcon + style={{ minWidth: 16, minHeight: 16, color: "var(--green-9)" }} + /> )} <Text as="div" @@ -73,6 +118,11 @@ export const HistoryItem: React.FC<{ > {historyItem.title} </Text> + {badge && ( + <Badge size="1" color="gray" variant="soft"> + {badge} + </Badge> + )} </Flex> <Flex justify="between" mt="8px"> @@ -81,18 +131,23 @@ export const HistoryItem: React.FC<{ size="1" style={{ display: "flex", gap: "4px", alignItems: "center" }} > - <ChatBubbleIcon />{" "} - {historyItem.messages.filter(isUserMessage).length} + <ChatBubbleIcon /> {historyItem.message_count ?? 0} </Text> - {totalCost ? ( + {totalCoins !== null && totalUsd === null && ( <Text size="1" style={{ display: "flex", gap: "4px", alignItems: "center" }} > - <Coin width="15px" height="15px" /> {Math.round(totalCost)} + <Coin width="15px" height="15px" /> {Math.round(totalCoins)} + </Text> + )} + {totalUsd !== null && ( + <Text + size="1" + style={{ display: "flex", gap: "4px", alignItems: "center" }} + > + {formatUsd(totalUsd)} </Text> - ) : ( - false )} </Flex> @@ -101,6 +156,33 @@ export const HistoryItem: React.FC<{ </button> </Card> + {childCount !== undefined && onToggleExpand && ( + <Box + onClick={(e) => { + e.stopPropagation(); + onToggleExpand(); + }} + style={{ + cursor: "pointer", + padding: "4px 8px", + borderRadius: "0 0 4px 4px", + marginTop: "-2px", + background: "var(--gray-a3)", + }} + > + <Flex align="center" justify="center" gap="1"> + <Text size="1" color="gray"> + {childCount} related {childCount === 1 ? "chat" : "chats"} + </Text> + {isExpanded ? ( + <ChevronDownIcon width={12} height={12} /> + ) : ( + <ChevronRightIcon width={12} height={12} /> + )} + </Flex> + </Box> + )} + <Flex position="absolute" top="6px" @@ -108,9 +190,7 @@ export const HistoryItem: React.FC<{ gap="1" justify="end" align="center" - // justify to flex end > - {/**TODO: open in tab button */} {onOpenInTab && ( <IconButton size="1" @@ -128,7 +208,6 @@ export const HistoryItem: React.FC<{ <CloseButton size="1" - // needs to be smaller onClick={(event) => { event.preventDefault(); event.stopPropagation(); diff --git a/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.module.css b/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.module.css new file mode 100644 index 000000000..0bd9f9f1b --- /dev/null +++ b/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.module.css @@ -0,0 +1,238 @@ +/* Container query context - apply to parent wrapper */ +.itemContainer { + container-type: inline-size; +} + +.item { + position: relative; + display: grid; + /* Columns: [chevronArea] [dot+badge] [title] [stats] [date] [actions] */ + grid-template-columns: 20px auto 1fr auto auto auto; + align-items: center; + gap: var(--space-2); + padding: var(--space-2) var(--space-2) var(--space-2) var(--space-2); + border-radius: var(--radius-2); + background: var(--gray-a2); + cursor: pointer; + transition: background-color 0.15s ease; + width: 100%; + min-height: 32px; +} + +.item:hover { + background: var(--gray-a4); +} + +.item.disabled { + opacity: 0.7; + cursor: default; + background: var(--gray-a3); +} + +.chevronArea { + display: flex; + align-items: center; + justify-content: center; + width: 20px; + min-width: 20px; +} + +.leftSection { + display: flex; + align-items: center; + gap: var(--space-2); + flex-shrink: 0; +} + +.modeBadge { + flex-shrink: 0; + max-width: 100px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.titleSection { + display: flex; + align-items: center; + gap: var(--space-2); + min-width: 0; + overflow: hidden; +} + +.title { + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + min-width: 60px; /* Ensure at least some title is visible */ +} + +.stats { + display: flex; + align-items: center; + gap: var(--space-1); + color: var(--gray-11); + flex-shrink: 0; +} + +.messagesCount { + display: contents; +} + +.coinsStats { + display: contents; +} + +.diffStats { + display: contents; +} + +.taskProgress { + display: contents; +} + +.statsSeparator { + width: 1px; + height: 10px; + background: var(--gray-a6); + margin: 0 var(--space-1); + flex-shrink: 0; +} + +.failedCount { + flex-shrink: 0; +} + +.linesAdded { + color: var(--green-11); + flex-shrink: 0; +} + +.linesRemoved { + color: var(--red-11); + flex-shrink: 0; +} + +.date { + min-width: 50px; + text-align: right; + flex-shrink: 0; +} + +.actions { + display: flex; + align-items: center; + gap: var(--space-2); + flex-shrink: 0; + min-width: 48px; +} + +.actionButton { + opacity: 0.5; + flex-shrink: 0; + transition: opacity 0.15s ease; +} + +.actionButton:hover { + opacity: 1; +} + +.item:hover .actionButton { + opacity: 0.7; +} + +.item:hover .actionButton:hover { + opacity: 1; +} + +.editInput { + flex: 1; + min-width: 0; +} + +.expandChevron { + display: flex; + align-items: center; + justify-content: center; + width: 16px; + height: 16px; + color: var(--gray-11); + cursor: pointer; + border-radius: var(--radius-1); + transition: + background-color 0.15s ease, + color 0.15s ease; +} + +.expandChevron:hover { + background: var(--gray-a4); + color: var(--gray-12); +} + +/* Child items - subtle visual distinction without width-breaking indentation */ +.childItem { + background: transparent; +} + +.childItem:hover { + background: var(--gray-a3); +} + +/* For touch devices, always show actions */ +@media (hover: none) { + .actions { + opacity: 1; + } +} + +@container (max-width: 650px) { + .diffStats { + display: none; + } +} + +@container (max-width: 550px) { + .messagesCount { + display: none; + } +} + +@container (max-width: 500px) { + .modeBadge { + display: none; + } +} + +@container (max-width: 450px) { + .coinsStats { + display: none; + } +} + +@container (max-width: 380px) { + .date { + display: none; + } +} + +@container (max-width: 320px) { + .taskProgress { + display: none; + } +} + +@container (max-width: 280px) { + .actions { + display: none; + } +} + +/* Extreme narrow - hide chevron area too */ +@container (max-width: 200px) { + .chevronArea { + display: none; + } + .item { + grid-template-columns: auto 1fr; + } +} diff --git a/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.tsx b/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.tsx new file mode 100644 index 000000000..cf486454e --- /dev/null +++ b/refact-agent/gui/src/components/ChatHistory/HistoryItemCompact.tsx @@ -0,0 +1,411 @@ +import React, { useState, useCallback } from "react"; +import { + Text, + IconButton, + TextField, + Badge, + HoverCard, +} from "@radix-ui/themes"; +import { + ChatBubbleIcon, + Pencil1Icon, + Cross1Icon, + CheckIcon, + ChevronDownIcon, + ChevronRightIcon, +} from "@radix-ui/react-icons"; +import { StatusDot } from "../StatusDot"; +import { Coin } from "../../images"; +import type { ChatHistoryItem } from "../../features/History/historySlice"; +import { + getStatusFromSessionState, + getStatusTooltip, +} from "../../utils/sessionStatus"; +import { CircularProgress } from "./CircularProgress"; +import { useGetChatModesQuery } from "../../services/refact/chatModes"; +import { getModeColor } from "../../utils/modeColors"; +import styles from "./HistoryItemCompact.module.css"; + +export interface HistoryItemCompactProps { + historyItem: ChatHistoryItem; + onClick: () => void; + onDelete: (id: string) => void; + onRename?: (id: string, newTitle: string) => void; + disabled: boolean; + badge?: string; + childCount?: number; + isExpanded?: boolean; + onToggleExpand?: () => void; + isChild?: boolean; +} + +function formatDateTime(dateString: string): string { + const date = new Date(dateString); + const now = new Date(); + const diffMs = now.getTime() - date.getTime(); + const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24)); + + if (diffDays === 0) { + return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" }); + } + if (diffDays === 1) { + return "Yesterday"; + } + if (diffDays < 7) { + return date.toLocaleDateString([], { weekday: "short" }); + } + return date.toLocaleDateString([], { month: "short", day: "numeric" }); +} + +function formatCoins(coins: number): string { + if (coins >= 1000000) { + return `${(coins / 1000000).toFixed(1)}M`; + } + if (coins >= 1000) { + return `${(coins / 1000).toFixed(1)}K`; + } + if (coins >= 1) { + return coins.toFixed(0); + } + return coins.toFixed(2); +} + +interface TooltipButtonProps { + onClick: (e: React.MouseEvent) => void; + tooltip: string; + children: React.ReactNode; + className?: string; +} + +const TooltipButton: React.FC<TooltipButtonProps> = ({ + onClick, + tooltip, + children, + className, +}) => ( + <HoverCard.Root openDelay={200} closeDelay={100}> + <HoverCard.Trigger> + <IconButton + size="1" + variant="ghost" + onClick={onClick} + className={className} + aria-label={tooltip} + > + {children} + </IconButton> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <Text as="p" size="1"> + {tooltip} + </Text> + </HoverCard.Content> + </HoverCard.Root> +); + +export const HistoryItemCompact: React.FC<HistoryItemCompactProps> = ({ + historyItem, + onClick, + onDelete, + onRename, + disabled, + badge, + childCount, + isExpanded, + onToggleExpand, + isChild = false, +}) => { + const [isEditing, setIsEditing] = useState(false); + const [editValue, setEditValue] = useState(historyItem.title); + const { data: modesData } = useGetChatModesQuery(undefined); + const statusState = getStatusFromSessionState(historyItem.session_state); + const statusTooltip = getStatusTooltip(historyItem.session_state); + + const modeId = historyItem.mode; + const modeInfo = modesData?.modes.find((m) => m.id === modeId); + const modeTitle = modeInfo?.title ?? modeId; + const dateTimeString = formatDateTime(historyItem.updatedAt); + const messageCount = historyItem.message_count ?? historyItem.messages.length; + const totalCoins = historyItem.total_coins; + const linesAdded = historyItem.total_lines_added ?? 0; + const linesRemoved = historyItem.total_lines_removed ?? 0; + const hasLineChanges = linesAdded > 0 || linesRemoved > 0; + const hasChildren = childCount !== undefined && childCount > 0; + const taskProgress = + historyItem.tasks_total && historyItem.tasks_total > 0 + ? { + done: historyItem.tasks_done ?? 0, + total: historyItem.tasks_total, + failed: historyItem.tasks_failed ?? 0, + } + : null; + + const handleStartEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setEditValue(historyItem.title); + setIsEditing(true); + }, + [historyItem.title], + ); + + const handleCancelEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsEditing(false); + setEditValue(historyItem.title); + }, + [historyItem.title], + ); + + const handleConfirmEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + if (editValue.trim() && onRename) { + onRename(historyItem.id, editValue.trim()); + } + setIsEditing(false); + }, + [editValue, historyItem.id, onRename], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault(); + if (editValue.trim() && onRename) { + onRename(historyItem.id, editValue.trim()); + } + setIsEditing(false); + } else if (e.key === "Escape") { + setIsEditing(false); + setEditValue(historyItem.title); + } + }, + [editValue, historyItem.id, historyItem.title, onRename], + ); + + const handleDelete = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + onDelete(historyItem.id); + }, + [historyItem.id, onDelete], + ); + + const handleClick = useCallback(() => { + if (!isEditing && !disabled) { + onClick(); + } + }, [isEditing, disabled, onClick]); + + const handleRowKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.target !== e.currentTarget) return; + if (disabled) return; + if ((e.key === "Enter" || e.key === " ") && !isEditing) { + e.preventDefault(); + onClick(); + } + }, + [disabled, isEditing, onClick], + ); + + const handleToggleExpand = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + onToggleExpand?.(); + }, + [onToggleExpand], + ); + + const handleChevronKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter" || e.key === " ") { + e.preventDefault(); + e.stopPropagation(); + onToggleExpand?.(); + } + }, + [onToggleExpand], + ); + + const itemClasses = [ + styles.item, + disabled ? styles.disabled : "", + isChild ? styles.childItem : "", + ] + .filter(Boolean) + .join(" "); + + const chevronTooltip = `${childCount} related ${ + childCount === 1 ? "chat" : "chats" + }`; + + return ( + <div className={styles.itemContainer}> + <div + className={itemClasses} + onClick={handleClick} + role="button" + tabIndex={disabled ? -1 : 0} + onKeyDown={handleRowKeyDown} + > + <div className={styles.chevronArea}> + {hasChildren && onToggleExpand && ( + <HoverCard.Root openDelay={200} closeDelay={100}> + <HoverCard.Trigger> + <div + className={styles.expandChevron} + onClick={handleToggleExpand} + onKeyDown={handleChevronKeyDown} + role="button" + tabIndex={0} + aria-label={chevronTooltip} + aria-expanded={isExpanded} + > + {isExpanded ? ( + <ChevronDownIcon width={14} height={14} /> + ) : ( + <ChevronRightIcon width={14} height={14} /> + )} + </div> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <Text as="p" size="1"> + {chevronTooltip} + </Text> + </HoverCard.Content> + </HoverCard.Root> + )} + </div> + + <div className={styles.leftSection}> + <StatusDot + state={statusState} + size="small" + tooltipText={statusTooltip} + /> + {modeTitle && modeTitle.toLowerCase() !== badge?.toLowerCase() && ( + <Badge + size="1" + color={getModeColor(modeId)} + variant="soft" + className={styles.modeBadge} + > + {modeTitle} + </Badge> + )} + {badge && ( + <Badge + size="1" + color="gray" + variant="soft" + style={{ flexShrink: 0 }} + > + {badge} + </Badge> + )} + </div> + + <div className={styles.titleSection}> + {isEditing ? ( + <TextField.Root + size="1" + value={editValue} + onChange={(e) => setEditValue(e.target.value)} + onKeyDown={handleKeyDown} + onClick={(e) => e.stopPropagation()} + autoFocus + className={styles.editInput} + /> + ) : ( + <Text as="span" size="2" weight="regular" className={styles.title}> + {historyItem.title} + </Text> + )} + </div> + + <div className={styles.stats}> + <span className={styles.messagesCount}> + <ChatBubbleIcon width={12} height={12} /> + <Text size="1" color="gray"> + {messageCount} + </Text> + </span> + {totalCoins !== undefined && totalCoins > 0 && ( + <span className={styles.coinsStats}> + <span className={styles.statsSeparator} /> + <Coin width={12} height={12} /> + <Text size="1" color="gray"> + {formatCoins(totalCoins)} + </Text> + </span> + )} + {hasLineChanges && ( + <span className={styles.diffStats}> + <span className={styles.statsSeparator} /> + <Text size="1" className={styles.linesAdded}> + +{linesAdded} + </Text> + <Text size="1" className={styles.linesRemoved}> + -{linesRemoved} + </Text> + </span> + )} + {taskProgress && ( + <span className={styles.taskProgress}> + <span className={styles.statsSeparator} /> + <CircularProgress + done={taskProgress.done} + total={taskProgress.total} + failed={taskProgress.failed} + /> + </span> + )} + </div> + + <Text size="1" color="gray" className={styles.date}> + {dateTimeString} + </Text> + + <div className={styles.actions}> + {isEditing ? ( + <> + <TooltipButton onClick={handleConfirmEdit} tooltip="Save"> + <CheckIcon width={12} height={12} /> + </TooltipButton> + <TooltipButton onClick={handleCancelEdit} tooltip="Cancel"> + <Cross1Icon width={10} height={10} /> + </TooltipButton> + </> + ) : ( + <> + {onRename && ( + <TooltipButton + onClick={handleStartEdit} + tooltip="Rename" + className={styles.actionButton} + > + <Pencil1Icon width={12} height={12} /> + </TooltipButton> + )} + <TooltipButton + onClick={handleDelete} + tooltip="Delete" + className={styles.actionButton} + > + <Cross1Icon width={10} height={10} /> + </TooltipButton> + </> + )} + </div> + </div> + </div> + ); +}; diff --git a/refact-agent/gui/src/components/ChatHistory/TaskItemCompact.tsx b/refact-agent/gui/src/components/ChatHistory/TaskItemCompact.tsx new file mode 100644 index 000000000..9092ace7c --- /dev/null +++ b/refact-agent/gui/src/components/ChatHistory/TaskItemCompact.tsx @@ -0,0 +1,304 @@ +import React, { useState, useCallback } from "react"; +import { + Text, + IconButton, + TextField, + HoverCard, + Badge, +} from "@radix-ui/themes"; +import { Cross1Icon, Pencil1Icon, CheckIcon } from "@radix-ui/react-icons"; +import { StatusDot, StatusDotState } from "../StatusDot"; +import { CircularProgress } from "./CircularProgress"; +import type { TaskMeta } from "../../services/refact/tasks"; +import styles from "./HistoryItemCompact.module.css"; + +export interface TaskItemCompactProps { + task: TaskMeta; + onClick: () => void; + onDelete: (id: string) => void; + onRename?: (id: string, newName: string) => void; + badge?: string; +} + +function getTaskStatusDotState(task: TaskMeta): StatusDotState { + const plannerState = task.planner_session_state; + + if (plannerState === "generating" || plannerState === "executing_tools") { + return "in_progress"; + } + if (plannerState === "paused" || plannerState === "waiting_ide") { + return "needs_attention"; + } + if (plannerState === "error" || task.status === "abandoned") { + return "error"; + } + if (task.status === "completed") { + return "completed"; + } + if (task.agents_active > 0) { + return "in_progress"; + } + return "idle"; +} + +function getTaskTooltip(task: TaskMeta): string { + const plannerState = task.planner_session_state; + + if (plannerState === "generating" || plannerState === "executing_tools") { + return "Planner is working..."; + } + if (plannerState === "paused" || plannerState === "waiting_ide") { + return "Waiting for confirmation"; + } + if (plannerState === "error") { + return "Planner error"; + } + if (task.status === "abandoned") { + return "Task failed"; + } + if (task.status === "completed") { + return "Task completed"; + } + if (task.agents_active > 0) { + return `${task.agents_active} agent${ + task.agents_active > 1 ? "s" : "" + } working...`; + } + if (task.status === "paused") { + return "Task paused"; + } + return "Task active"; +} + +function formatDateTime(dateString: string): string { + const date = new Date(dateString); + const now = new Date(); + const diffMs = now.getTime() - date.getTime(); + const diffDays = Math.floor(diffMs / (1000 * 60 * 60 * 24)); + + if (diffDays === 0) { + return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" }); + } + if (diffDays === 1) { + return "Yesterday"; + } + if (diffDays < 7) { + return date.toLocaleDateString([], { weekday: "short" }); + } + return date.toLocaleDateString([], { month: "short", day: "numeric" }); +} + +interface TooltipButtonProps { + onClick: (e: React.MouseEvent) => void; + tooltip: string; + children: React.ReactNode; + className?: string; +} + +const TooltipButton: React.FC<TooltipButtonProps> = ({ + onClick, + tooltip, + children, + className, +}) => ( + <HoverCard.Root openDelay={200} closeDelay={100}> + <HoverCard.Trigger> + <IconButton + size="1" + variant="ghost" + onClick={onClick} + className={className} + aria-label={tooltip} + > + {children} + </IconButton> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <Text as="p" size="1"> + {tooltip} + </Text> + </HoverCard.Content> + </HoverCard.Root> +); + +export const TaskItemCompact: React.FC<TaskItemCompactProps> = ({ + task, + onClick, + onDelete, + onRename, + badge, +}) => { + const [isEditing, setIsEditing] = useState(false); + const [editValue, setEditValue] = useState(task.name); + const statusState = getTaskStatusDotState(task); + const tooltipText = getTaskTooltip(task); + const dateTimeString = formatDateTime(task.updated_at); + + const handleStartEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setEditValue(task.name); + setIsEditing(true); + }, + [task.name], + ); + + const handleCancelEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsEditing(false); + setEditValue(task.name); + }, + [task.name], + ); + + const handleConfirmEdit = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + if (editValue.trim() && onRename) { + onRename(task.id, editValue.trim()); + } + setIsEditing(false); + }, + [editValue, task.id, onRename], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault(); + if (editValue.trim() && onRename) { + onRename(task.id, editValue.trim()); + } + setIsEditing(false); + } else if (e.key === "Escape") { + setIsEditing(false); + setEditValue(task.name); + } + }, + [editValue, task.id, task.name, onRename], + ); + + const handleDelete = useCallback( + (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + onDelete(task.id); + }, + [task.id, onDelete], + ); + + const handleClick = useCallback(() => { + if (!isEditing) { + onClick(); + } + }, [isEditing, onClick]); + + const handleRowKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.target !== e.currentTarget) return; + if ((e.key === "Enter" || e.key === " ") && !isEditing) { + e.preventDefault(); + onClick(); + } + }, + [isEditing, onClick], + ); + + return ( + <div className={styles.itemContainer}> + <div + className={styles.item} + onClick={handleClick} + role="button" + tabIndex={0} + onKeyDown={handleRowKeyDown} + > + <div className={styles.chevronArea} /> + + <div className={styles.leftSection}> + <StatusDot + state={statusState} + size="small" + tooltipText={tooltipText} + /> + {badge && ( + <Badge + size="1" + color="gray" + variant="soft" + style={{ flexShrink: 0 }} + > + {badge} + </Badge> + )} + </div> + + <div className={styles.titleSection}> + {isEditing ? ( + <TextField.Root + size="1" + value={editValue} + onChange={(e) => setEditValue(e.target.value)} + onKeyDown={handleKeyDown} + onClick={(e) => e.stopPropagation()} + autoFocus + className={styles.editInput} + /> + ) : ( + <Text as="span" size="2" className={styles.title}> + {task.name} + </Text> + )} + </div> + + <div className={styles.stats}> + <CircularProgress + done={task.cards_done} + total={task.cards_total} + failed={task.cards_failed} + /> + </div> + + <Text size="1" color="gray" className={styles.date}> + {dateTimeString} + </Text> + + <div className={styles.actions}> + {isEditing ? ( + <> + <TooltipButton onClick={handleConfirmEdit} tooltip="Save"> + <CheckIcon width={12} height={12} /> + </TooltipButton> + <TooltipButton onClick={handleCancelEdit} tooltip="Cancel"> + <Cross1Icon width={10} height={10} /> + </TooltipButton> + </> + ) : ( + <> + {onRename && ( + <TooltipButton + onClick={handleStartEdit} + tooltip="Rename" + className={styles.actionButton} + > + <Pencil1Icon width={12} height={12} /> + </TooltipButton> + )} + <TooltipButton + onClick={handleDelete} + tooltip="Delete" + className={styles.actionButton} + > + <Cross1Icon width={10} height={10} /> + </TooltipButton> + </> + )} + </div> + </div> + </div> + ); +}; diff --git a/refact-agent/gui/src/components/ChatHistory/index.tsx b/refact-agent/gui/src/components/ChatHistory/index.tsx index 5ba8f205a..971e32e70 100644 --- a/refact-agent/gui/src/components/ChatHistory/index.tsx +++ b/refact-agent/gui/src/components/ChatHistory/index.tsx @@ -1 +1,7 @@ export { ChatHistory, type ChatHistoryProps } from "./ChatHistory"; +export { HistoryItem } from "./HistoryItem"; +export { + HistoryItemCompact, + type HistoryItemCompactProps, +} from "./HistoryItemCompact"; +export { TaskItemCompact, type TaskItemCompactProps } from "./TaskItemCompact"; diff --git a/refact-agent/gui/src/components/ChatLinks/UncommittedChangesWarning.tsx b/refact-agent/gui/src/components/ChatLinks/UncommittedChangesWarning.tsx index 75778251e..69c65860d 100644 --- a/refact-agent/gui/src/components/ChatLinks/UncommittedChangesWarning.tsx +++ b/refact-agent/gui/src/components/ChatLinks/UncommittedChangesWarning.tsx @@ -6,10 +6,11 @@ import { selectIsStreaming, selectIsWaiting, selectMessages, - selectThreadToolUse, + selectThreadMode, } from "../../features/Chat"; import { getErrorMessage } from "../../features/Errors/errorsSlice"; import { getInformationMessage } from "../../features/Errors/informationSlice"; +import { useGetChatModesQuery } from "../../services/refact/chatModes"; export const UncommittedChangesWarning: React.FC = () => { const isStreaming = useAppSelector(selectIsStreaming); @@ -17,15 +18,23 @@ export const UncommittedChangesWarning: React.FC = () => { const linksRequest = useGetLinksFromLsp(); const error = useAppSelector(getErrorMessage); const information = useAppSelector(getInformationMessage); - const toolUse = useAppSelector(selectThreadToolUse); + const currentMode = useAppSelector(selectThreadMode); const messages = useAppSelector(selectMessages); + const modesQuery = useGetChatModesQuery(undefined); + + const modeHasEditing = React.useMemo(() => { + if (!modesQuery.data?.modes) return false; + const modeInfo = modesQuery.data.modes.find((m) => m.id === currentMode); + if (!modeInfo) return currentMode === "agent"; + return modeInfo.ui.tags.includes("editing"); + }, [modesQuery.data?.modes, currentMode]); const hasCallout = React.useMemo(() => { return !!error || !!information; }, [error, information]); if ( - toolUse !== "agent" || + !modeHasEditing || messages.length !== 0 || hasCallout || isStreaming || diff --git a/refact-agent/gui/src/components/ChatRawJSON/ChatRawJSON.tsx b/refact-agent/gui/src/components/ChatRawJSON/ChatRawJSON.tsx index 675017438..7ca4127de 100644 --- a/refact-agent/gui/src/components/ChatRawJSON/ChatRawJSON.tsx +++ b/refact-agent/gui/src/components/ChatRawJSON/ChatRawJSON.tsx @@ -1,10 +1,9 @@ import { Box, Button, Flex, Heading } from "@radix-ui/themes"; import { ScrollArea } from "../ScrollArea"; -import { MarkdownCodeBlock } from "../Markdown/CodeBlock"; -import { ChatHistoryItem } from "../../events"; +import { ShikiCodeBlock } from "../Markdown/ShikiCodeBlock"; type ChatRawJSONProps = { - thread: ChatHistoryItem; + thread: { title?: string; [key: string]: unknown }; copyHandler: () => void; }; @@ -45,12 +44,12 @@ export const ChatRawJSON = ({ thread, copyHandler }: ChatRawJSONProps) => { > <ScrollArea scrollbars="horizontal" style={{ width: "100%" }} asChild> <Box> - <MarkdownCodeBlock - useInlineStyles={true} + <ShikiCodeBlock + className="language-json" preOptions={{ noMargin: true }} > {JSON.stringify(thread, null, 2)} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Box> </ScrollArea> </Flex> diff --git a/refact-agent/gui/src/components/ComboBox/ComboBox.module.css b/refact-agent/gui/src/components/ComboBox/ComboBox.module.css index 153134458..ce9f34202 100644 --- a/refact-agent/gui/src/components/ComboBox/ComboBox.module.css +++ b/refact-agent/gui/src/components/ComboBox/ComboBox.module.css @@ -7,6 +7,9 @@ max-width: 50vw; max-width: 50dvw; border-radius: max(var(--radius-2), var(--radius-full)); + /* Force GPU compositing to fix JCEF repaint issues in JetBrains IDEs */ + transform: translateZ(0); + will-change: transform; } .popover__scroll { diff --git a/refact-agent/gui/src/components/ComboBox/ComboBox.test.tsx b/refact-agent/gui/src/components/ComboBox/ComboBox.test.tsx index 64636eb67..7f61243a3 100644 --- a/refact-agent/gui/src/components/ComboBox/ComboBox.test.tsx +++ b/refact-agent/gui/src/components/ComboBox/ComboBox.test.tsx @@ -245,7 +245,9 @@ describe("ComboBox", () => { test("type part of the command, then press enter", async () => { const { user, ...app } = render(<App />); const textarea = app.getByRole("combobox"); - await user.type(textarea, "@fi{Enter}"); + await user.type(textarea, "@fi"); + await pause(50); + await user.keyboard("{Enter}"); await waitFor(() => { expect(app.getByRole("combobox").textContent).toEqual("@file "); }); @@ -323,7 +325,9 @@ describe("ComboBox", () => { test("select command, type space and then delete the command", async () => { const { user, ...app } = render(<App />); const textarea = app.getByRole("combobox"); - await user.type(textarea, "@fi{Enter}"); + await user.type(textarea, "@fi"); + await pause(50); + await user.keyboard("{Enter}"); await waitFor(() => { expect(app.getByRole("combobox").textContent).toEqual("@file "); }); diff --git a/refact-agent/gui/src/components/Command/Markdown.tsx b/refact-agent/gui/src/components/Command/Markdown.tsx index 76abde8a0..06a2a580f 100644 --- a/refact-agent/gui/src/components/Command/Markdown.tsx +++ b/refact-agent/gui/src/components/Command/Markdown.tsx @@ -4,14 +4,8 @@ import ReactMarkdown, { type UrlTransform, } from "react-markdown"; import styles from "./Command.module.css"; -import { type SyntaxHighlighterProps } from "react-syntax-highlighter"; import classNames from "classnames"; -import type { Element } from "hast"; -import hljsStyle from "react-syntax-highlighter/dist/esm/styles/hljs/agate"; -import { - MarkdownCodeBlock, - type MarkdownCodeBlockProps, -} from "../Markdown/CodeBlock"; +import { ShikiCodeBlock } from "../Markdown/ShikiCodeBlock"; const dataUrlPattern = /^data:image\/(png|jpeg|gif|bmp|webp);base64,[A-Za-z0-9+/]+={0,2}$/; @@ -23,16 +17,11 @@ const urlTransform: UrlTransform = (value) => { return defaultUrlTransform(value); }; -type CodeBlockProps = React.JSX.IntrinsicElements["code"] & { - node?: Element | undefined; - style?: MarkdownCodeBlockProps["style"]; -} & Pick<SyntaxHighlighterProps, "showLineNumbers" | "startingLineNumber">; - export type MarkdownProps = { children: string; className?: string; isInsideScrollArea?: boolean; -} & Pick<CodeBlockProps, "showLineNumbers" | "startingLineNumber" | "style">; +}; const Image: React.FC< React.DetailedHTMLProps< @@ -47,7 +36,6 @@ export const Markdown: React.FC<MarkdownProps> = ({ children, className, isInsideScrollArea, - style = hljsStyle, }) => { return ( <ReactMarkdown @@ -57,12 +45,11 @@ export const Markdown: React.FC<MarkdownProps> = ({ })} components={{ code({ color: _color, ref: _ref, node: _node, ...props }) { - return <MarkdownCodeBlock {...props} style={style} />; + return <ShikiCodeBlock {...props} />; }, p({ color: _color, ref: _ref, node: _node, ...props }) { return <div {...props} />; }, - img({ color: _color, ref: _ref, node: _node, ...props }) { return <Image {...props} />; }, @@ -73,7 +60,7 @@ export const Markdown: React.FC<MarkdownProps> = ({ ); }; -export type CommandMarkdownProps = Omit<MarkdownProps, "style">; +export type CommandMarkdownProps = MarkdownProps; export const CommandMarkdown: React.FC<CommandMarkdownProps> = (props) => ( <Markdown {...props} /> ); diff --git a/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatus.module.css b/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatus.module.css new file mode 100644 index 000000000..a35d59bcb --- /dev/null +++ b/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatus.module.css @@ -0,0 +1,91 @@ +.indicator { + padding: 0; +} + +.iconConnected { + color: var(--green-9); + width: 14px; + height: 14px; +} + +.iconDisconnected { + color: var(--red-9); + width: 14px; + height: 14px; +} + +.iconReconnecting { + color: var(--orange-9); + width: 14px; + height: 14px; + animation: spin 1s linear infinite; +} + +.reconnectingPulse { + animation: reconnectingPulse 1.5s ease-in-out infinite; +} + +@keyframes reconnectingPulse { + 0%, + 100% { + box-shadow: 0 0 0 0 rgba(255, 140, 0, 0.4); + } + 50% { + box-shadow: 0 0 0 4px rgba(255, 140, 0, 0); + } +} + +@keyframes spin { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} + +/* Unified toolbar button style */ +.statusButton { + display: flex; + align-items: center; + justify-content: center; + width: 34px; + height: 34px; + padding: 0; + background: transparent; + border: none; + border-radius: var(--radius-2); + cursor: pointer; + transition: background-color 0.15s ease; + flex-shrink: 0; +} + +.statusButton.statusConnected:hover { + background: rgba(48, 164, 108, 0.15); +} + +.statusButton.statusDisconnected:hover { + background: rgba(229, 72, 77, 0.15); +} + +.statusButton.statusReconnecting:hover { + background: rgba(255, 140, 0, 0.15); +} + +.statusButton.statusRefreshing:hover { + background: var(--gray-a3); +} + +.statusButton:focus-visible { + outline: 2px solid var(--accent-8); + outline-offset: -2px; +} + +.statusButton:disabled { + cursor: default; + opacity: 0.5; +} + +.statusButton:disabled:hover { + background: transparent; +} diff --git a/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatusIndicator.tsx b/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatusIndicator.tsx new file mode 100644 index 000000000..dce04b51f --- /dev/null +++ b/refact-agent/gui/src/components/ConnectionStatus/ConnectionStatusIndicator.tsx @@ -0,0 +1,139 @@ +import React, { useState, useCallback } from "react"; +import { Flex, HoverCard, Spinner, Text } from "@radix-ui/themes"; +import { + CheckCircledIcon, + CrossCircledIcon, + UpdateIcon, +} from "@radix-ui/react-icons"; +import { useAppSelector } from "../../hooks/useAppSelector"; +import { useAppDispatch } from "../../hooks/useAppDispatch"; +import { + selectIsFullyConnected, + selectConnectionProblem, + selectBackendStatus, + selectCurrentChatSseStatus, +} from "../../features/Connection"; +import { requestSseRefresh } from "../../features/Chat/Thread/actions"; +import { selectCurrentThreadId } from "../../features/Chat/Thread/selectors"; +import { trajectoriesApi } from "../../services/refact/trajectories"; +import { tasksApi } from "../../services/refact/tasks"; +import { + hydrateHistoryFromMeta, + setPagination, +} from "../../features/History/historySlice"; +import styles from "./ConnectionStatus.module.css"; + +export const ConnectionStatusIndicator: React.FC = () => { + const dispatch = useAppDispatch(); + const isConnected = useAppSelector(selectIsFullyConnected); + const problem = useAppSelector(selectConnectionProblem); + const backendStatus = useAppSelector(selectBackendStatus); + const sseStatus = useAppSelector(selectCurrentChatSseStatus); + const currentThreadId = useAppSelector(selectCurrentThreadId); + const [isRefreshing, setIsRefreshing] = useState(false); + + const handleRefresh = useCallback(async () => { + setIsRefreshing(true); + const trajQuery = dispatch( + trajectoriesApi.endpoints.listTrajectoriesPaginated.initiate( + { limit: 50 }, + { forceRefetch: true }, + ), + ); + const tasksQuery = dispatch( + tasksApi.endpoints.listTasks.initiate(undefined, { + forceRefetch: true, + }), + ); + try { + if (currentThreadId) { + dispatch(requestSseRefresh({ chatId: currentThreadId })); + } + const trajectoriesResult = await trajQuery.unwrap(); + await tasksQuery.unwrap(); + dispatch(hydrateHistoryFromMeta(trajectoriesResult.items)); + dispatch( + setPagination({ + cursor: trajectoriesResult.next_cursor, + hasMore: trajectoriesResult.has_more, + }), + ); + } finally { + trajQuery.unsubscribe(); + tasksQuery.unsubscribe(); + setIsRefreshing(false); + } + }, [dispatch, currentThreadId]); + + const isReconnecting = + sseStatus === "connecting" || backendStatus === "unknown"; + + const getStatusClass = () => { + if (isRefreshing) return styles.statusRefreshing; + if (isConnected) return styles.statusConnected; + if (isReconnecting) return styles.statusReconnecting; + return styles.statusDisconnected; + }; + + if (isConnected) { + return ( + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={() => void handleRefresh()} + disabled={isRefreshing} + className={`${styles.statusButton} ${getStatusClass()}`} + > + <Flex align="center" gap="1" className={styles.indicator}> + {isRefreshing ? ( + <Spinner size="1" /> + ) : ( + <CheckCircledIcon className={styles.iconConnected} /> + )} + </Flex> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + Connected - Click to refresh + </Text> + </HoverCard.Content> + </HoverCard.Root> + ); + } + + return ( + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + onClick={() => void handleRefresh()} + disabled={isRefreshing || isReconnecting} + className={`${styles.statusButton} ${getStatusClass()} ${ + isReconnecting ? styles.reconnectingPulse : "" + }`} + > + <Flex align="center" className={styles.indicator}> + {isRefreshing ? ( + <Spinner size="1" /> + ) : isReconnecting ? ( + <UpdateIcon className={styles.iconReconnecting} /> + ) : ( + <CrossCircledIcon className={styles.iconDisconnected} /> + )} + </Flex> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + {isReconnecting + ? "Reconnecting..." + : `${problem ?? "Disconnected"} - Click to retry`} + </Text> + </HoverCard.Content> + </HoverCard.Root> + ); +}; + +export default ConnectionStatusIndicator; diff --git a/refact-agent/gui/src/components/ConnectionStatus/index.ts b/refact-agent/gui/src/components/ConnectionStatus/index.ts new file mode 100644 index 000000000..f2d23fb1a --- /dev/null +++ b/refact-agent/gui/src/components/ConnectionStatus/index.ts @@ -0,0 +1 @@ +export { ConnectionStatusIndicator } from "./ConnectionStatusIndicator"; diff --git a/refact-agent/gui/src/components/Dropzone/Dropzone.module.css b/refact-agent/gui/src/components/Dropzone/Dropzone.module.css new file mode 100644 index 000000000..720c80d8d --- /dev/null +++ b/refact-agent/gui/src/components/Dropzone/Dropzone.module.css @@ -0,0 +1,4 @@ +/* Use shared iconButton */ +.iconButton { + composes: iconButton from "../shared/iconButton.module.css"; +} diff --git a/refact-agent/gui/src/components/Dropzone/Dropzone.tsx b/refact-agent/gui/src/components/Dropzone/Dropzone.tsx index 44424b9d9..6c3ddef4d 100644 --- a/refact-agent/gui/src/components/Dropzone/Dropzone.tsx +++ b/refact-agent/gui/src/components/Dropzone/Dropzone.tsx @@ -1,6 +1,7 @@ import React, { createContext, useCallback } from "react"; -import { Button, Slot, IconButton, Flex } from "@radix-ui/themes"; +import { Button, Slot, Flex, HoverCard, Text } from "@radix-ui/themes"; import { Cross1Icon, ImageIcon } from "@radix-ui/react-icons"; +import styles from "./Dropzone.module.css"; import { DropzoneInputProps, FileRejection, useDropzone } from "react-dropzone"; import { useAttachedImages } from "../../hooks/useAttachedImages"; import { TruncateLeft } from "../Text"; @@ -20,13 +21,30 @@ export const FileUploadContext = createContext<{ export const DropzoneProvider: React.FC< React.PropsWithChildren<{ asChild?: boolean }> > = ({ asChild, ...props }) => { - const { setError, processAndInsertImages } = useAttachedImages(); + const { setError, processAndInsertImages, processAndInsertTextFiles } = + useAttachedImages(); const { isMultimodalitySupportedForCurrentModel } = useCapsForToolUse(); const onDrop = useCallback( (acceptedFiles: File[], fileRejections: FileRejection[]): void => { - if (!isMultimodalitySupportedForCurrentModel) return; - processAndInsertImages(acceptedFiles); + const imageFiles = acceptedFiles.filter( + (f) => f.type === "image/jpeg" || f.type === "image/png", + ); + const textFiles = acceptedFiles.filter( + (f) => f.type !== "image/jpeg" && f.type !== "image/png", + ); + + if (imageFiles.length > 0) { + if (!isMultimodalitySupportedForCurrentModel) { + setError("Current model does not support images"); + } else { + processAndInsertImages(imageFiles); + } + } + + if (textFiles.length > 0) { + processAndInsertTextFiles(textFiles); + } if (fileRejections.length) { const rejectedFileMessage = fileRejections.map((file) => { @@ -38,7 +56,12 @@ export const DropzoneProvider: React.FC< setError(rejectedFileMessage.join("\n")); } }, - [processAndInsertImages, setError, isMultimodalitySupportedForCurrentModel], + [ + processAndInsertImages, + processAndInsertTextFiles, + setError, + isMultimodalitySupportedForCurrentModel, + ], ); // TODO: disable when chat is busy @@ -46,19 +69,6 @@ export const DropzoneProvider: React.FC< disabled: false, noClick: true, noKeyboard: true, - accept: { - // "image/*": [] - // "image/apng": [], - // "image/avif": [], - // "image/gif": [], - "image/jpeg": [], - "image/png": [], - // "image/svg+xml": [], - // "image/webp": [], - // "image/bmp": [], - // "image/x-icon": [], - // "image/tiff": [] - }, onDrop, }); @@ -104,17 +114,26 @@ export const AttachImagesButton = () => { return ( <> <input {...inputProps} style={{ display: "none" }} /> - <IconButton - variant="ghost" - size="1" - title="Attach images" - disabled={inputProps.disabled} - onClick={(event) => { - attachFileOnClick(event, open); - }} - > - <ImageIcon /> - </IconButton> + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + className={styles.iconButton} + disabled={inputProps.disabled} + onClick={(event) => { + attachFileOnClick(event, open); + }} + aria-label="Attach images" + > + <ImageIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top"> + <Text as="p" size="2"> + Attach images + </Text> + </HoverCard.Content> + </HoverCard.Root> </> ); }} @@ -129,7 +148,7 @@ export const FileList: React.FC<FileListProps> = ({ attachedFiles }) => { const { images, removeImage } = useAttachedImages(); if (images.length === 0 && attachedFiles.files.length === 0) return null; return ( - <Flex wrap="wrap" gap="1" py="2" data-testid="attached_file_list"> + <Flex wrap="wrap" gap="1" data-testid="attached_file_list"> {images.map((file, index) => { const key = `image-${file.name}-${index}`; return ( @@ -160,6 +179,7 @@ const FileButton: React.FC<{ fileName: string; onClick: () => void }> = ({ }) => { return ( <Button + type="button" variant="soft" radius="full" size="1" diff --git a/refact-agent/gui/src/components/IntegrationsView/Header/IntegrationsHeader.module.css b/refact-agent/gui/src/components/IntegrationsView/Header/IntegrationsHeader.module.css index 47789272d..29e87cf5d 100644 --- a/refact-agent/gui/src/components/IntegrationsView/Header/IntegrationsHeader.module.css +++ b/refact-agent/gui/src/components/IntegrationsView/Header/IntegrationsHeader.module.css @@ -3,7 +3,7 @@ top: 0; left: 0; width: 100%; - z-index: 1000; + z-index: var(--z-fixed, 400); } .IntegrationsHeaderIcon { diff --git a/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/IntegrationForm.module.css b/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/IntegrationForm.module.css index 4d0da47d1..054839bde 100644 --- a/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/IntegrationForm.module.css +++ b/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/IntegrationForm.module.css @@ -1,12 +1,15 @@ .button { cursor: pointer; padding: 0 0.75rem; - transition: all 0.3s ease-in-out; + transition: + background-color 0.15s ease, + color 0.15s ease, + opacity 0.15s ease; } .disabledButton { cursor: not-allowed; - backdrop-filter: blur(1000px); + backdrop-filter: blur(16px); } .DockerIcon { diff --git a/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/MCPLogs.tsx b/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/MCPLogs.tsx index 8141d8565..189ad5380 100644 --- a/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/MCPLogs.tsx +++ b/refact-agent/gui/src/components/IntegrationsView/IntegrationForm/MCPLogs.tsx @@ -2,7 +2,7 @@ import React from "react"; import { useGetMCPLogs } from "./useGetMCPLogs"; import { ScrollArea } from "../../ScrollArea"; import { Box, Flex, Heading, Text } from "@radix-ui/themes"; -import { MarkdownCodeBlock } from "../../Markdown/CodeBlock"; +import { ShikiCodeBlock } from "../../Markdown/ShikiCodeBlock"; type MCPLogsProps = { integrationPath: string; @@ -41,16 +41,15 @@ export const MCPLogs: React.FC<MCPLogsProps> = ({ </Text> <ScrollArea scrollbars="horizontal" style={{ width: "100%" }} asChild> <Box maxHeight="250px"> - <MarkdownCodeBlock + <ShikiCodeBlock className="language-bash" - startingLineNumber={1} showLineNumbers={false} preOptions={{ noMargin: true, }} > {formattedData} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Box> </ScrollArea> </Flex> diff --git a/refact-agent/gui/src/components/Loading/Loading.module.css b/refact-agent/gui/src/components/Loading/Loading.module.css new file mode 100644 index 000000000..2a294e3ac --- /dev/null +++ b/refact-agent/gui/src/components/Loading/Loading.module.css @@ -0,0 +1,70 @@ +.container { + animation: fadeIn 0.15s ease-out; + padding: var(--space-2) 0; + width: 100%; + align-items: center; +} + +@keyframes fadeIn { + from { + opacity: 0; + } + to { + opacity: 1; + } +} + +.dot { + width: 8px; + height: 8px; + background-color: var(--accent-9); + border-radius: 50%; + animation: bounce 1.4s ease-in-out infinite; +} + +.dot:nth-child(1) { + animation-delay: 0s; +} + +.dot:nth-child(2) { + animation-delay: 0.2s; +} + +.dot:nth-child(3) { + animation-delay: 0.4s; +} + +@keyframes bounce { + 0%, + 80%, + 100% { + transform: scale(0.6); + opacity: 0.5; + } + 40% { + transform: scale(1); + opacity: 1; + } +} + +.skeletonLine { + height: 10px; + background: linear-gradient( + 90deg, + var(--gray-a3) 0%, + var(--gray-a5) 50%, + var(--gray-a3) 100% + ); + background-size: 200% 100%; + border-radius: var(--radius-2); + animation: shimmer 1.5s ease-in-out infinite; +} + +@keyframes shimmer { + 0% { + background-position: 200% 0; + } + 100% { + background-position: -200% 0; + } +} diff --git a/refact-agent/gui/src/components/Loading/Loading.tsx b/refact-agent/gui/src/components/Loading/Loading.tsx new file mode 100644 index 000000000..89c500148 --- /dev/null +++ b/refact-agent/gui/src/components/Loading/Loading.tsx @@ -0,0 +1,19 @@ +import React from "react"; +import { Flex, Box } from "@radix-ui/themes"; +import styles from "./Loading.module.css"; + +export const Loading: React.FC = () => { + return ( + <Flex direction="column" gap="2" className={styles.container}> + <Flex gap="2" align="center"> + <Box className={styles.dot} /> + <Box className={styles.dot} /> + <Box className={styles.dot} /> + </Flex> + <Box className={styles.skeletonLine} style={{ width: "80%" }} /> + <Box className={styles.skeletonLine} style={{ width: "60%" }} /> + </Flex> + ); +}; + +Loading.displayName = "Loading"; diff --git a/refact-agent/gui/src/components/Loading/index.ts b/refact-agent/gui/src/components/Loading/index.ts new file mode 100644 index 000000000..0de49db4c --- /dev/null +++ b/refact-agent/gui/src/components/Loading/index.ts @@ -0,0 +1 @@ +export { Loading } from "./Loading"; diff --git a/refact-agent/gui/src/components/Markdown/CodeBlock.tsx b/refact-agent/gui/src/components/Markdown/CodeBlock.tsx deleted file mode 100644 index 214fd15dd..000000000 --- a/refact-agent/gui/src/components/Markdown/CodeBlock.tsx +++ /dev/null @@ -1,109 +0,0 @@ -import React, { CSSProperties } from "react"; -import SyntaxHighlighter, { - type SyntaxHighlighterProps, -} from "react-syntax-highlighter"; -import { Code, CodeProps, Text } from "@radix-ui/themes"; -import classNames from "classnames"; -import { PreTag, PreTagProps } from "./Pre"; -// import "./highlightjs.css"; -import styles from "./Markdown.module.css"; -import type { Element } from "hast"; -import hljsStyle from "react-syntax-highlighter/dist/esm/styles/hljs/agate"; -import { trimIndent } from "../../utils"; - -export type MarkdownControls = { - onCopyClick: (str: string) => void; -}; - -export type MarkdownCodeBlockProps = React.JSX.IntrinsicElements["code"] & { - node?: Element | undefined; - style?: Record<string, CSSProperties> | SyntaxHighlighterProps["style"]; - wrap?: boolean; - preOptions?: { - noMargin?: boolean; - widthMaxContent?: boolean; - }; - color?: CodeProps["color"]; -} & Pick< - SyntaxHighlighterProps, - "showLineNumbers" | "startingLineNumber" | "useInlineStyles" - > & - Partial<MarkdownControls>; - -const _MarkdownCodeBlock: React.FC<MarkdownCodeBlockProps> = ({ - children, - className, - style = hljsStyle, - onCopyClick, - wrap = false, - preOptions = { widthMaxContent: false, noMargin: false }, - color = undefined, - useInlineStyles, - showLineNumbers = false, -}) => { - const codeRef = React.useRef<HTMLElement | null>(null); - const match = /language-(\w+)/.exec(className ?? ""); - const textWithOutTrailingNewLine = - children === undefined ? undefined : String(children).replace(/\n$/, ""); - const textWithOutIndent = trimIndent(textWithOutTrailingNewLine); - - const preTagProps: PreTagProps = - onCopyClick && textWithOutIndent - ? { - onCopyClick: () => { - if (codeRef.current?.textContent) { - onCopyClick(codeRef.current.textContent); - } - }, - } - : {}; - - if (match ?? String(children).includes("\n")) { - const language: string = match && match.length > 0 ? match[1] : "text"; - return ( - <Text size="2"> - <SyntaxHighlighter - style={style} - className={className} - PreTag={(props) => ( - <PreTag - {...props} - className={classNames({ - [styles.pre_width_max_content]: preOptions.widthMaxContent, - [styles.code_no_margin]: preOptions.noMargin, - })} - {...preTagProps} - /> - )} - CodeTag={(props) => ( - <Code - {...props} - className={classNames( - styles.code, - styles.code_block, - wrap && styles.code_wrap, - )} - ref={codeRef} - /> - )} - showLineNumbers={showLineNumbers} - language={language} - useInlineStyles={useInlineStyles} - > - {textWithOutIndent ? textWithOutIndent : "No content"} - </SyntaxHighlighter> - </Text> - ); - } - - return ( - <Code - className={classNames(styles.code, styles.code_inline, className)} - color={color} - > - {children} - </Code> - ); -}; - -export const MarkdownCodeBlock = React.memo(_MarkdownCodeBlock); diff --git a/refact-agent/gui/src/components/Markdown/Markdown.module.css b/refact-agent/gui/src/components/Markdown/Markdown.module.css index 6d14b0387..62c6eab77 100644 --- a/refact-agent/gui/src/components/Markdown/Markdown.module.css +++ b/refact-agent/gui/src/components/Markdown/Markdown.module.css @@ -1,9 +1,21 @@ .relative { position: relative; } + .markdown { position: relative; max-width: 100%; + font-family: + system-ui, + -apple-system, + BlinkMacSystemFont, + "Segoe UI", + Roboto, + "Helvetica Neue", + Arial, + sans-serif; + line-height: 1.6; + white-space: normal; } .floatButton { @@ -14,6 +26,9 @@ .code { display: inline-block; max-width: 100%; + font-family: "SF Mono", "Fira Code", "JetBrains Mono", Consolas, + "Liberation Mono", Menlo, monospace; + font-size: 0.9em; } .code_no_margin { @@ -28,11 +43,18 @@ word-break: break-word; word-wrap: break-word; white-space: break-spaces; + background-color: var(--gray-a3); + padding: 2px 6px; + border-radius: 4px; + font-size: 0.875em; } .code_block { + composes: scrollbarThin from "../shared/scrollbar.module.css"; color: inherit; background: none; + display: block; + overflow-x: auto; } .code_wrap { @@ -47,8 +69,100 @@ ); } +.shiki_wrapper { + display: block; + margin: var(--space-3) 0; +} + +.shiki_pre { + composes: scrollbarThin from "../shared/scrollbar.module.css"; + position: relative; + margin: 0; + padding: var(--space-3); + border-radius: var(--radius-2); + background-color: var(--gray-a2) !important; + overflow-x: auto; +} + +/* Copy button - hidden by default, shown on hover or focus */ +.shiki_pre .copy_button { + position: absolute; + top: var(--space-2); + right: var(--space-2); + opacity: 0; + pointer-events: none; + transition: opacity 0.15s ease; + z-index: 1; +} + +.shiki_pre:hover .copy_button, +.shiki_pre:focus-within .copy_button { + opacity: 1; + pointer-events: auto; +} + +.shiki_code { + display: flex; + flex-direction: row; + gap: var(--space-3); +} + +.shiki_code code { + flex: 1; + font-family: "SF Mono", "Fira Code", "JetBrains Mono", Consolas, + "Liberation Mono", Menlo, monospace; + font-size: var(--font-size-1); + line-height: 1.4; + white-space: normal; +} + +.shiki_code :global(.line) { + display: block; + white-space: pre; + line-height: 1.4; +} + +/* Empty lines need content to maintain height */ +.shiki_code :global(.line:empty)::before { + content: " "; +} + +/* Wrap mode override */ +.code_wrap :global(.line) { + white-space: pre-wrap; +} + +.line_numbers { + display: flex; + flex-direction: column; + text-align: right; + user-select: none; + color: var(--gray-9); + font-size: var(--font-size-1); + line-height: 1.4; + padding-right: var(--space-2); + border-right: 1px solid var(--gray-5); +} + +.line_number { + line-height: 1.4; +} + .list { padding-inline-start: var(--space-6); + margin: var(--space-2) 0; +} + +.list_item { + margin: 0; + padding: 0; + line-height: 1.5; +} + +/* Remove margins from nested elements inside list items */ +.list_item > :global(.rt-Text), +.list_item > :global(p) { + margin: 0 !important; } .maybe_pin { @@ -74,3 +188,33 @@ .maybe_pin:has(.patch_title) + pre pre { margin-top: 0; } + +@keyframes streamFadeIn { + from { + opacity: 0.6; + } + to { + opacity: 1; + } +} + +@keyframes cursorBlink { + 0%, + 100% { + opacity: 1; + } + 50% { + opacity: 0; + } +} + +.streaming_text { + animation: streamFadeIn 0.15s ease-out; +} + +.streaming_cursor::after { + content: "▋"; + animation: cursorBlink 1s step-end infinite; + color: var(--accent-9); + margin-left: 2px; +} diff --git a/refact-agent/gui/src/components/Markdown/Markdown.tsx b/refact-agent/gui/src/components/Markdown/Markdown.tsx index 2c338a07a..c20424237 100644 --- a/refact-agent/gui/src/components/Markdown/Markdown.tsx +++ b/refact-agent/gui/src/components/Markdown/Markdown.tsx @@ -2,13 +2,12 @@ import React, { Key, useMemo } from "react"; import ReactMarkdown, { Components } from "react-markdown"; import remarkBreaks from "remark-breaks"; import classNames from "classnames"; -// import "./highlightjs.css"; import styles from "./Markdown.module.css"; import { - MarkdownCodeBlock, - type MarkdownCodeBlockProps, + ShikiCodeBlock, + type ShikiCodeBlockProps, type MarkdownControls, -} from "./CodeBlock"; +} from "./ShikiCodeBlock"; import { Text, Heading, @@ -25,23 +24,21 @@ import rehypeKatex from "rehype-katex"; import remarkMath from "remark-math"; import remarkGfm from "remark-gfm"; import "katex/dist/katex.min.css"; +import type { PluggableList } from "unified"; import { useLinksFromLsp } from "../../hooks"; +const REMARK_PLUGINS: PluggableList = [remarkBreaks, remarkMath, remarkGfm]; +const REHYPE_PLUGINS: PluggableList = [rehypeKatex]; + import { ChatLinkButton } from "../ChatLinks"; import { extractLinkFromPuzzle } from "../../utils/extractLinkFromPuzzle"; +import { useInternalLinkHandler } from "../../contexts/internalLinkUtils"; export type MarkdownProps = Pick< React.ComponentProps<typeof ReactMarkdown>, "children" | "allowedElements" | "unwrapDisallowed" > & - Pick< - MarkdownCodeBlockProps, - | "startingLineNumber" - | "showLineNumbers" - | "useInlineStyles" - | "style" - | "color" - > & { + Pick<ShikiCodeBlockProps, "showLineNumbers" | "color"> & { canHaveInteractiveElements?: boolean; wrap?: boolean; } & Partial<MarkdownControls>; @@ -74,7 +71,7 @@ const MaybeInteractiveElement: React.FC<{ }); return ( - <Text className={styles.maybe_pin} my="2"> + <Text as="div" className={styles.maybe_pin} my="2"> {processed} </Text> ); @@ -86,8 +83,12 @@ const _Markdown: React.FC<MarkdownProps> = ({ unwrapDisallowed, canHaveInteractiveElements, color, - ...rest + showLineNumbers, + wrap, + onCopyClick, }) => { + const internalLinkContext = useInternalLinkHandler(); + const components: Partial<Components> = useMemo(() => { return { ol(props) { @@ -100,8 +101,24 @@ const _Markdown: React.FC<MarkdownProps> = ({ <ul {...props} className={classNames(styles.list, props.className)} /> ); }, + li({ color: _color, ref: _ref, node: _node, ...props }) { + return ( + <li + {...props} + className={classNames(styles.list_item, props.className)} + /> + ); + }, code({ style: _style, color: _color, ...props }) { - return <MarkdownCodeBlock color={color} {...props} {...rest} />; + return ( + <ShikiCodeBlock + color={color} + showLineNumbers={showLineNumbers} + wrap={wrap} + onCopyClick={onCopyClick} + {...props} + /> + ); }, p({ color: _color, ref: _ref, node: _node, ...props }) { if (canHaveInteractiveElements) { @@ -137,13 +154,36 @@ const _Markdown: React.FC<MarkdownProps> = ({ return <Kbd {...props} />; }, a({ color: _color, ref: _ref, node: _node, ...props }) { - const shouldTargetBeBlank = - props.href && - (props.href.startsWith("http") || props.href.startsWith("https")); + const href = props.href ?? ""; + const isInternalLink = href.startsWith("refact://"); + const isHttpLink = + href.startsWith("http://") || href.startsWith("https://"); + const isMailtoLink = href.startsWith("mailto:"); + const isSafeProtocol = isInternalLink || isHttpLink || isMailtoLink; + + if (!isSafeProtocol && href.includes(":")) { + return <span>{props.children}</span>; + } + + if (isInternalLink) { + return ( + <Link + {...props} + onClick={(e: React.MouseEvent) => { + if (internalLinkContext?.handleInternalLink(href)) { + e.preventDefault(); + } + }} + style={{ cursor: "pointer" }} + /> + ); + } + return ( <Link {...props} - target={shouldTargetBeBlank ? "_blank" : undefined} + target={isHttpLink ? "_blank" : undefined} + rel={isHttpLink ? "noopener noreferrer" : undefined} /> ); }, @@ -178,12 +218,19 @@ const _Markdown: React.FC<MarkdownProps> = ({ return <Table.Cell {...props} />; }, }; - }, [rest, canHaveInteractiveElements, color]); + }, [ + canHaveInteractiveElements, + color, + internalLinkContext, + showLineNumbers, + wrap, + onCopyClick, + ]); return ( <ReactMarkdown className={styles.markdown} - remarkPlugins={[remarkBreaks, remarkMath, remarkGfm]} - rehypePlugins={[rehypeKatex]} + remarkPlugins={REMARK_PLUGINS} + rehypePlugins={REHYPE_PLUGINS} allowedElements={allowedElements} unwrapDisallowed={unwrapDisallowed} components={components} diff --git a/refact-agent/gui/src/components/Markdown/Pre.tsx b/refact-agent/gui/src/components/Markdown/Pre.tsx index 80de21b9b..bef520568 100644 --- a/refact-agent/gui/src/components/Markdown/Pre.tsx +++ b/refact-agent/gui/src/components/Markdown/Pre.tsx @@ -1,56 +1,27 @@ import React from "react"; -import "./highlightjs.css"; -import { Flex, Button } from "@radix-ui/themes"; -import { useConfig } from "../../hooks"; -import { RightButtonGroup, RightButton } from "../Buttons"; +import { IconButton, Tooltip } from "@radix-ui/themes"; +import { CopyIcon } from "@radix-ui/react-icons"; +import styles from "./Markdown.module.css"; const PreTagWithButtons: React.FC< React.PropsWithChildren<{ onCopyClick: () => void; + className?: string; }> -> = ({ children, onCopyClick, ...props }) => { - const config = useConfig(); - +> = ({ children, onCopyClick, className, ...props }) => { return ( - <pre {...props}> - {config.host === "web" ? ( - <RightButtonGroup - direction="column" - style={{ - position: "static", - minHeight: "var(--space-6)", - }} - > - <Flex - gap="1" - justify="end" - style={{ position: "absolute", right: "0" }} - pr="2" - pt="1" - > - <RightButton onClick={onCopyClick}>Copy</RightButton> - </Flex> - </RightButtonGroup> - ) : ( - <RightButtonGroup - direction="column" - style={{ - position: "static", - minHeight: "var(--space-5)", - }} + <pre className={className} {...props}> + <Tooltip content="Copy"> + <IconButton + size="1" + variant="soft" + className={styles.copy_button} + onClick={onCopyClick} + aria-label="Copy code" > - <Flex - gap="1" - justify="end" - style={{ position: "absolute", right: "0" }} - pr="2" - > - <Button size="1" variant="surface" onClick={onCopyClick}> - ⿻ Copy - </Button> - </Flex> - </RightButtonGroup> - )} + <CopyIcon width={12} height={12} /> + </IconButton> + </Tooltip> {children} </pre> ); @@ -58,13 +29,29 @@ const PreTagWithButtons: React.FC< export type PreTagProps = { onCopyClick?: () => void; + className?: string; }; -export const PreTag: React.FC<React.PropsWithChildren<PreTagProps>> = ( - props, -) => { - if (props.onCopyClick) { - return <PreTagWithButtons {...props} onCopyClick={props.onCopyClick} />; +export const PreTag: React.FC<React.PropsWithChildren<PreTagProps>> = ({ + onCopyClick, + className, + children, + ...rest +}) => { + if (onCopyClick) { + return ( + <PreTagWithButtons + onCopyClick={onCopyClick} + className={className} + {...rest} + > + {children} + </PreTagWithButtons> + ); } - return <pre {...props} />; + return ( + <pre className={className} {...rest}> + {children} + </pre> + ); }; diff --git a/refact-agent/gui/src/components/Markdown/ShikiCodeBlock.tsx b/refact-agent/gui/src/components/Markdown/ShikiCodeBlock.tsx new file mode 100644 index 000000000..a7482cc21 --- /dev/null +++ b/refact-agent/gui/src/components/Markdown/ShikiCodeBlock.tsx @@ -0,0 +1,175 @@ +import React, { CSSProperties, useEffect, useState, useMemo } from "react"; +import { Code, CodeProps, Box } from "@radix-ui/themes"; +import classNames from "classnames"; +import { PreTag, PreTagProps } from "./Pre"; +import styles from "./Markdown.module.css"; +import type { Element } from "hast"; +import { trimIndent } from "../../utils"; +import { useShiki } from "../../hooks/useShiki"; +import { useAppearance } from "../../hooks/useAppearance"; + +export type MarkdownControls = { + onCopyClick: (str: string) => void; +}; + +export type ShikiCodeBlockProps = React.JSX.IntrinsicElements["code"] & { + node?: Element | undefined; + style?: CSSProperties; + wrap?: boolean; + preOptions?: { + noMargin?: boolean; + widthMaxContent?: boolean; + }; + color?: CodeProps["color"]; + showLineNumbers?: boolean; +} & Partial<MarkdownControls>; + +const MAX_HIGHLIGHT_CHARS = 50000; + +const _ShikiCodeBlock: React.FC<ShikiCodeBlockProps> = ({ + children, + className, + onCopyClick, + wrap = false, + preOptions = { widthMaxContent: false, noMargin: false }, + color = undefined, + showLineNumbers = false, +}) => { + const codeRef = React.useRef<HTMLElement | null>(null); + const { highlight, isReady } = useShiki(); + const { appearance } = useAppearance(); + const [highlightedHtml, setHighlightedHtml] = useState<string | null>(null); + + const match = /language-([^\s]+)/.exec(className ?? ""); + const textWithOutTrailingNewLine = + children === undefined ? undefined : String(children).replace(/\n$/, ""); + const textWithOutIndent = trimIndent(textWithOutTrailingNewLine); + + const isBlock = match !== null || String(children).includes("\n"); + const language: string = match?.[1] ?? "text"; + const isDark = appearance === "dark"; + + const shouldHighlight = + isBlock && + isReady && + textWithOutIndent && + textWithOutIndent.length <= MAX_HIGHLIGHT_CHARS; + + useEffect(() => { + if (!shouldHighlight || !textWithOutIndent) { + setHighlightedHtml(null); + return; + } + + let cancelled = false; + const timer = setTimeout(() => { + highlight(textWithOutIndent, language, isDark) + .then((result) => { + if (!cancelled) { + setHighlightedHtml(result.html); + } + }) + .catch(() => { + if (!cancelled) { + setHighlightedHtml(null); + } + }); + }, 300); + + return () => { + cancelled = true; + clearTimeout(timer); + }; + }, [shouldHighlight, textWithOutIndent, language, isDark, highlight]); + + const preTagProps: PreTagProps = useMemo(() => { + if (onCopyClick && textWithOutIndent) { + return { + onCopyClick: () => { + if (codeRef.current?.textContent) { + onCopyClick(codeRef.current.textContent); + } + }, + }; + } + return {}; + }, [onCopyClick, textWithOutIndent]); + + if (!isBlock) { + return ( + <Code + variant="ghost" + className={classNames(styles.code, styles.code_inline, className)} + color={color} + > + {children} + </Code> + ); + } + + return ( + <Box className={styles.shiki_wrapper}> + <PreTag + className={classNames({ + [styles.pre_width_max_content]: preOptions.widthMaxContent, + [styles.code_no_margin]: preOptions.noMargin, + [styles.shiki_pre]: true, + })} + {...preTagProps} + > + {highlightedHtml ? ( + <div + className={classNames(styles.shiki_code, wrap && styles.code_wrap)} + > + {showLineNumbers && ( + <div className={styles.line_numbers}> + {textWithOutIndent?.split("\n").map((_, i) => ( + <span key={i} className={styles.line_number}> + {i + 1} + </span> + ))} + </div> + )} + <code + ref={codeRef} + className={classNames(styles.code, styles.code_block)} + dangerouslySetInnerHTML={{ + __html: stripShikiBackground( + extractCodeContent(highlightedHtml), + ), + }} + /> + </div> + ) : ( + <code + className={classNames( + styles.code, + styles.code_block, + wrap && styles.code_wrap, + )} + ref={codeRef} + > + {textWithOutIndent} + </code> + )} + </PreTag> + </Box> + ); +}; + +function extractCodeContent(html: string): string { + const codeMatch = /<code[^>]*>([\s\S]*?)<\/code>/i.exec(html); + if (codeMatch) { + return codeMatch[1]; + } + return html.replace(/<\/?pre[^>]*>/gi, "").replace(/<\/?code[^>]*>/gi, ""); +} + +function stripShikiBackground(html: string): string { + return html + .replace(/style="[^"]*background-color:[^;"]*;?/gi, 'style="') + .replace(/style="[^"]*background:[^;"]*;?/gi, 'style="') + .replace(/style="\s*"/g, ""); +} + +export const ShikiCodeBlock = React.memo(_ShikiCodeBlock); diff --git a/refact-agent/gui/src/components/Markdown/ToolMarkdown.tsx b/refact-agent/gui/src/components/Markdown/ToolMarkdown.tsx index 9f8a8250b..393c465af 100644 --- a/refact-agent/gui/src/components/Markdown/ToolMarkdown.tsx +++ b/refact-agent/gui/src/components/Markdown/ToolMarkdown.tsx @@ -4,7 +4,7 @@ import remarkBreaks from "remark-breaks"; import remarkGfm from "remark-gfm"; import remarkMath from "remark-math"; import rehypeKatex from "rehype-katex"; -import { MarkdownCodeBlock, type MarkdownCodeBlockProps } from "./CodeBlock"; +import { ShikiCodeBlock, type ShikiCodeBlockProps } from "./ShikiCodeBlock"; import toolStyles from "./ToolMarkdown.module.css"; import "katex/dist/katex.min.css"; @@ -12,7 +12,7 @@ export type ToolMarkdownProps = Pick< React.ComponentProps<typeof ReactMarkdown>, "children" | "allowedElements" | "unwrapDisallowed" > & - Pick<MarkdownCodeBlockProps, "style" | "color">; + Pick<ShikiCodeBlockProps, "color">; /** * ToolMarkdown - A specialized markdown renderer for tool outputs @@ -27,7 +27,6 @@ export const ToolMarkdown: React.FC<ToolMarkdownProps> = ({ children, allowedElements, unwrapDisallowed, - style, color, }) => { const components: Partial<Components> = useMemo(() => { @@ -68,9 +67,8 @@ export const ToolMarkdown: React.FC<ToolMarkdownProps> = ({ return <li className={toolStyles.listItem} {...props} />; }, - // Code blocks - use the same style as tool output code({ style: _style, color: _color, ...props }) { - return <MarkdownCodeBlock color={color} style={style} {...props} />; + return <ShikiCodeBlock color={color} {...props} />; }, // Inline elements @@ -90,14 +88,22 @@ export const ToolMarkdown: React.FC<ToolMarkdownProps> = ({ return <em {...props} />; }, a({ color: _color, ref: _ref, node: _node, ...props }) { - const shouldTargetBeBlank = - props.href && - (props.href.startsWith("http") || props.href.startsWith("https")); + const href = props.href ?? ""; + const isHttpLink = + href.startsWith("http://") || href.startsWith("https://"); + const isMailtoLink = href.startsWith("mailto:"); + const isSafeProtocol = isHttpLink || isMailtoLink; + + if (!isSafeProtocol && href.includes(":")) { + return <span>{props.children}</span>; + } + return ( <a className={toolStyles.link} {...props} - target={shouldTargetBeBlank ? "_blank" : undefined} + target={isHttpLink ? "_blank" : undefined} + rel={isHttpLink ? "noopener noreferrer" : undefined} /> ); }, @@ -122,7 +128,7 @@ export const ToolMarkdown: React.FC<ToolMarkdownProps> = ({ return <td className={toolStyles.td} {...props} />; }, }; - }, [style, color]); + }, [color]); return ( <ReactMarkdown diff --git a/refact-agent/gui/src/components/Markdown/highlightjs.css b/refact-agent/gui/src/components/Markdown/highlightjs.css deleted file mode 100644 index 601a4218a..000000000 --- a/refact-agent/gui/src/components/Markdown/highlightjs.css +++ /dev/null @@ -1,103 +0,0 @@ -html.light { - /* --hlbg: #ffffff; */ - --hlbg: rgba(0, 0, 0, 0.1); - --hlcolor1: #000000; - --hlcolor2: #000000; - --hlcolor3: #000080; - --hlcolor4: #800080; - --hlcolor5: #808000; - --hlcolor6: #800000; - --hlcolor7: #0055af; - --hlcolor8: #008000; - --hlcolor9: #008000; -} - -html.dark { - /* --hlbg: #000000; */ - --hlbg: rgba(255, 255, 255, 0.1); - --hlcolor1: #aaaaaa; - --hlcolor2: #a8a8a2; - --hlcolor3: #ff55ff; - --hlcolor4: #aaaaff; - --hlcolor5: #ffff55; - --hlcolor6: #ff5555; - --hlcolor7: #8888ff; - --hlcolor8: #ff55ff; - --hlcolor9: #55ffff; -} - -.hljs { - display: block; - overflow-x: auto; - padding: 0.5em; - background: var(--hlbg); -} - -.hljs, -.hljs-subst, -.hljs-tag, -.hljs-title { - color: var(--hlcolor1); -} - -.hljs-strong, -.hljs-emphasis { - color: var(--hlcolor2); -} - -.hljs-bullet, -.hljs-quote, -.hljs-number, -.hljs-regexp, -.hljs-literal { - color: var(--hlcolor3); -} - -.hljs-code .hljs-selector-class { - color: var(--hlcolor4); -} - -.hljs-emphasis, -.hljs-stronge, -.hljs-type { - font-style: italic; -} - -.hljs-keyword, -.hljs-selector-tag, -.hljs-function, -.hljs-section, -.hljs-symbol, -.hljs-name { - color: var(--hlcolor5); -} - -.hljs-attribute { - color: var(--hlcolor6); -} - -.hljs-variable, -.hljs-params, -.hljs-class .hljs-title { - color: var(--hlcolor7); -} - -.hljs-string, -.hljs-selector-id, -.hljs-selector-attr, -.hljs-selector-pseudo, -.hljs-type, -.hljs-built_in, -.hljs-builtin-name, -.hljs-template-tag, -.hljs-template-variable, -.hljs-addition, -.hljs-link { - color: var(--hlcolor8); -} - -.hljs-comment, -.hljs-meta, -.hljs-deletion { - color: var(--hlcolor9); -} diff --git a/refact-agent/gui/src/components/Markdown/index.tsx b/refact-agent/gui/src/components/Markdown/index.tsx index 58ffe923a..a3a5a96b7 100644 --- a/refact-agent/gui/src/components/Markdown/index.tsx +++ b/refact-agent/gui/src/components/Markdown/index.tsx @@ -2,3 +2,5 @@ export { Markdown } from "./Markdown"; export type { MarkdownProps } from "./Markdown"; export { ToolMarkdown } from "./ToolMarkdown"; export type { ToolMarkdownProps } from "./ToolMarkdown"; +export { ShikiCodeBlock } from "./ShikiCodeBlock"; +export type { ShikiCodeBlockProps, MarkdownControls } from "./ShikiCodeBlock"; diff --git a/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.module.css b/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.module.css new file mode 100644 index 000000000..6e2f554e1 --- /dev/null +++ b/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.module.css @@ -0,0 +1,105 @@ +.container { + display: flex; + flex-direction: column; + gap: var(--space-3); +} + +.sliderRow { + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.sliderHeader { + display: flex; + justify-content: space-between; + align-items: center; +} + +.sliderTrack { + display: flex; + align-items: center; + gap: var(--space-2); +} + +.slider { + flex: 1; +} + +.resetButton { + display: flex; + align-items: center; + justify-content: center; + width: 16px; + height: 16px; + padding: 0; + background: transparent; + border: none; + border-radius: var(--radius-1); + cursor: pointer; + color: var(--gray-9); + font-size: 10px; + line-height: 1; + transition: + background-color 0.15s ease, + color 0.15s ease; +} + +.resetButton:hover:not(:disabled) { + background-color: var(--gray-a4); + color: var(--gray-12); +} + +.resetButton:disabled { + opacity: 0.5; + cursor: default; +} + +.effortRow { + display: flex; + align-items: center; + justify-content: space-between; + gap: var(--space-2); +} + +.effortButtons { + display: flex; + gap: var(--space-1); +} + +.effortButton { + padding: var(--space-1) var(--space-2); + background: var(--gray-a3); + border: 1px solid var(--gray-a6); + border-radius: var(--radius-2); + cursor: pointer; + color: var(--gray-11); + transition: all 0.15s ease; +} + +.effortButton:hover:not(:disabled) { + background: var(--gray-a4); + border-color: var(--gray-a8); +} + +.effortButton:disabled { + opacity: 0.5; + cursor: default; +} + +.effortButtonActive { + background: var(--accent-a4); + border-color: var(--accent-a8); + color: var(--accent-11); +} + +.effortButtonActive:hover:not(:disabled) { + background: var(--accent-a5); + border-color: var(--accent-a9); +} + +.reasoningSection { + display: flex; + flex-direction: column; + gap: var(--space-2); +} diff --git a/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.tsx b/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.tsx new file mode 100644 index 000000000..c88326c78 --- /dev/null +++ b/refact-agent/gui/src/components/ModelSamplingParams/ModelSamplingParams.tsx @@ -0,0 +1,240 @@ +import React, { useMemo } from "react"; +import { Flex, Text, Slider, Switch } from "@radix-ui/themes"; +import { useGetCapsQuery } from "../../services/refact/caps"; +import styles from "./ModelSamplingParams.module.css"; + +export type SamplingValues = { + temperature?: number; + max_new_tokens?: number; + top_p?: number; + boost_reasoning?: boolean; + reasoning_effort?: string; + thinking_budget?: number; +}; + +type ModelSamplingParamsProps = { + model: string | undefined; + values: SamplingValues; + onChange: <K extends keyof SamplingValues>( + field: K, + value: SamplingValues[K], + ) => void; + disabled?: boolean; + size?: "1" | "2"; +}; + +function formatTokens(tokens: number): string { + if (tokens >= 1000000) { + return `${(tokens / 1000000).toFixed(tokens % 1000000 === 0 ? 0 : 1)}M`; + } + return `${Math.round(tokens / 1000)}K`; +} + +export const ModelSamplingParams: React.FC<ModelSamplingParamsProps> = ({ + model, + values, + onChange, + disabled = false, + size = "1", +}) => { + const { data: capsData } = useGetCapsQuery(undefined); + + const modelDetail = useMemo(() => { + if (!model || !capsData?.chat_models) return null; + const m = capsData.chat_models[model] as + | { + n_ctx?: number; + default_temperature?: number | null; + default_max_tokens?: number | null; + max_output_tokens?: number | null; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; + } + | undefined; + return m ?? null; + }, [model, capsData]); + + const defaultTemp = modelDetail?.default_temperature ?? 0.7; + const defaultMaxTokens = modelDetail?.default_max_tokens ?? 4096; + const maxOutputTokens = modelDetail?.max_output_tokens ?? 16384; + const reasoningEffortOptions = modelDetail?.reasoning_effort_options; + const supportsThinkingBudget = modelDetail?.supports_thinking_budget ?? false; + const supportsReasoning = + (reasoningEffortOptions != null && reasoningEffortOptions.length > 0) || + supportsThinkingBudget; + + const hasAnyReasoningConfigured = + (values.boost_reasoning ?? false) || + values.reasoning_effort != null || + values.thinking_budget != null; + + return ( + <div className={styles.container}> + {/* Reasoning */} + {supportsReasoning && ( + <div className={styles.reasoningSection}> + <Flex align="center" justify="between" gap="3"> + <Flex align="center" gap="1"> + <Text size={size}>🧠</Text> + <Text size={size} weight="medium"> + Reasoning + </Text> + </Flex> + <Switch + size="1" + checked={values.boost_reasoning ?? false} + onCheckedChange={(checked) => { + onChange("boost_reasoning", checked || undefined); + if (!checked) { + onChange("reasoning_effort", undefined); + onChange("thinking_budget", undefined); + } + }} + disabled={disabled} + /> + </Flex> + + {values.boost_reasoning && ( + <> + {reasoningEffortOptions != null && + reasoningEffortOptions.length > 0 && ( + <div className={styles.effortRow}> + <Text size={size} color="gray"> + Effort + </Text> + <div className={styles.effortButtons}> + {reasoningEffortOptions.map((level) => ( + <button + key={level} + type="button" + className={`${styles.effortButton} ${ + (values.reasoning_effort ?? "medium") === level + ? styles.effortButtonActive + : "" + }`} + onClick={() => onChange("reasoning_effort", level)} + disabled={disabled} + > + <Text size={size}>{level}</Text> + </button> + ))} + </div> + </div> + )} + + {supportsThinkingBudget && ( + <div className={styles.sliderRow}> + <div className={styles.sliderHeader}> + <Text size={size} color="gray"> + Thinking tokens + </Text> + <Text size={size} weight="medium"> + {values.thinking_budget ?? 16384} + </Text> + </div> + <div className={styles.sliderTrack}> + <Text size="1" color="gray"> + 1K + </Text> + <Slider + size="1" + min={1024} + max={32768} + step={1024} + value={[values.thinking_budget ?? 16384]} + onValueChange={(v) => onChange("thinking_budget", v[0])} + disabled={disabled} + className={styles.slider} + /> + <Text size="1" color="gray"> + 32K + </Text> + </div> + </div> + )} + </> + )} + </div> + )} + + {/* Temperature */} + <div className={styles.sliderRow}> + <div className={styles.sliderHeader}> + <Text size={size} color="gray"> + Temperature + </Text> + <Flex align="center" gap="2"> + <Text size={size} weight="medium"> + {hasAnyReasoningConfigured + ? "None" + : values.temperature?.toFixed(1) ?? + `${defaultTemp.toFixed(1)} (default)`} + </Text> + {values.temperature != null && ( + <button + type="button" + className={styles.resetButton} + onClick={() => onChange("temperature", undefined)} + disabled={disabled || hasAnyReasoningConfigured} + > + ✕ + </button> + )} + </Flex> + </div> + <Slider + size="1" + min={0} + max={2} + step={0.1} + value={[values.temperature ?? defaultTemp]} + onValueChange={(v) => onChange("temperature", v[0])} + disabled={disabled || hasAnyReasoningConfigured} + /> + </div> + + {/* Max Tokens */} + <div className={styles.sliderRow}> + <div className={styles.sliderHeader}> + <Text size={size} color="gray"> + Max tokens + </Text> + <Flex align="center" gap="2"> + <Text size={size} weight="medium"> + {values.max_new_tokens ?? `${defaultMaxTokens} (default)`} + </Text> + {values.max_new_tokens != null && ( + <button + type="button" + className={styles.resetButton} + onClick={() => onChange("max_new_tokens", undefined)} + disabled={disabled} + > + ✕ + </button> + )} + </Flex> + </div> + <div className={styles.sliderTrack}> + <Text size="1" color="gray"> + 1K + </Text> + <Slider + size="1" + min={1024} + max={maxOutputTokens} + step={1024} + value={[values.max_new_tokens ?? defaultMaxTokens]} + onValueChange={(v) => onChange("max_new_tokens", v[0])} + disabled={disabled} + className={styles.slider} + /> + <Text size="1" color="gray"> + {formatTokens(maxOutputTokens)} + </Text> + </div> + </div> + </div> + ); +}; diff --git a/refact-agent/gui/src/components/ModelSamplingParams/index.ts b/refact-agent/gui/src/components/ModelSamplingParams/index.ts new file mode 100644 index 000000000..81eb3c83f --- /dev/null +++ b/refact-agent/gui/src/components/ModelSamplingParams/index.ts @@ -0,0 +1,2 @@ +export { ModelSamplingParams } from "./ModelSamplingParams"; +export type { SamplingValues } from "./ModelSamplingParams"; diff --git a/refact-agent/gui/src/components/PageWrapper/PageWrapper.module.css b/refact-agent/gui/src/components/PageWrapper/PageWrapper.module.css index b775896dd..68eaf0b8b 100644 --- a/refact-agent/gui/src/components/PageWrapper/PageWrapper.module.css +++ b/refact-agent/gui/src/components/PageWrapper/PageWrapper.module.css @@ -2,9 +2,9 @@ width: 100%; /* justify-content: center; */ /* Fallback height for JB */ - max-height: 100vh; - max-height: 100dvh; - height: 100%; + flex: 1; + min-height: 0; + overflow: hidden; } @media print { .PageWrapper { diff --git a/refact-agent/gui/src/components/PageWrapper/PageWrapper.tsx b/refact-agent/gui/src/components/PageWrapper/PageWrapper.tsx index 13d78ea74..793f4e20e 100644 --- a/refact-agent/gui/src/components/PageWrapper/PageWrapper.tsx +++ b/refact-agent/gui/src/components/PageWrapper/PageWrapper.tsx @@ -9,6 +9,7 @@ type PageWrapperProps = { host: Config["host"]; className?: string; style?: React.CSSProperties; + noPadding?: boolean; }; export const PageWrapper: React.FC<PageWrapperProps> = ({ @@ -16,16 +17,17 @@ export const PageWrapper: React.FC<PageWrapperProps> = ({ className, host, style, + noPadding, }) => { const xPadding = useMemo(() => { - if (host === "web") return { initial: "8", xl: "9" }; + if (host === "web") return { initial: "4", xl: "6" }; return { initial: "2", xs: "2", - sm: "4", - md: "8", - lg: "8", - xl: "9", + sm: "3", + md: "4", + lg: "5", + xl: "6", }; }, [host]); @@ -38,8 +40,8 @@ export const PageWrapper: React.FC<PageWrapperProps> = ({ direction="column" justify="between" flexGrow="1" - py={yPadding} - px={xPadding} + py={noPadding ? "0" : yPadding} + px={noPadding ? "2" : xPadding} className={classNames(styles.PageWrapper, className)} style={style} > diff --git a/refact-agent/gui/src/components/Reveal/Reveal.tsx b/refact-agent/gui/src/components/Reveal/Reveal.tsx index 4689da4ef..3ce51cdc4 100644 --- a/refact-agent/gui/src/components/Reveal/Reveal.tsx +++ b/refact-agent/gui/src/components/Reveal/Reveal.tsx @@ -1,5 +1,6 @@ -import React, { useCallback } from "react"; +import React, { useCallback, useEffect, useState } from "react"; import { Box, Button, Flex } from "@radix-ui/themes"; +import { useCollapsibleStore } from "../ChatContent/useStoredOpen"; import styles from "./reveal.module.css"; import classNames from "classnames"; @@ -8,6 +9,7 @@ export type RevealProps = { defaultOpen: boolean; isRevealingCode?: boolean; onClose?: () => void; + storeKey?: string; }; const RevealButton: React.FC<{ @@ -41,8 +43,20 @@ export const Reveal: React.FC<RevealProps> = ({ defaultOpen, isRevealingCode = false, onClose, + storeKey, }) => { - const [open, setOpen] = React.useState(defaultOpen); + const store = useCollapsibleStore(); + const [open, setOpen] = useState(() => { + if (storeKey && store) { + const stored = store.get(storeKey); + if (stored !== undefined) return stored; + } + return defaultOpen; + }); + + useEffect(() => { + if (storeKey && store) store.set(storeKey, open); + }, [storeKey, store, open]); const handleClick = useCallback(() => { if (defaultOpen) return; diff --git a/refact-agent/gui/src/components/ScrollArea/ScrollArea.module.css b/refact-agent/gui/src/components/ScrollArea/ScrollArea.module.css index 845de6d72..3e917339a 100644 --- a/refact-agent/gui/src/components/ScrollArea/ScrollArea.module.css +++ b/refact-agent/gui/src/components/ScrollArea/ScrollArea.module.css @@ -17,3 +17,31 @@ .full_height :global(.rt-ScrollAreaViewport > *) { height: 100%; } + +.vertical :global(.rt-ScrollAreaScrollbar[data-orientation="vertical"]) { + width: 5px; +} + +/* Hide the scrollbar track by default; show on hover (matches chat UX). */ +.vertical :global(.rt-ScrollAreaScrollbar[data-orientation="vertical"]) { + opacity: 0; + transition: opacity 150ms ease-out; +} + +.vertical:hover :global(.rt-ScrollAreaScrollbar[data-orientation="vertical"]) { + opacity: 1; +} + +.vertical :global(.rt-ScrollAreaThumb) { + background-color: var(--gray-a5); + border-radius: 2px; + transition: background-color 150ms ease-out; +} + +.vertical :global(.rt-ScrollAreaScrollbar:hover .rt-ScrollAreaThumb) { + background-color: var(--gray-a6); +} + +.vertical :global(.rt-ScrollAreaThumb:active) { + background-color: var(--gray-a7); +} diff --git a/refact-agent/gui/src/components/ScrollArea/ScrollToBottomButton.tsx b/refact-agent/gui/src/components/ScrollArea/ScrollToBottomButton.tsx index c06226694..803c56fc2 100644 --- a/refact-agent/gui/src/components/ScrollArea/ScrollToBottomButton.tsx +++ b/refact-agent/gui/src/components/ScrollArea/ScrollToBottomButton.tsx @@ -1,5 +1,5 @@ import { ArrowDownIcon } from "@radix-ui/react-icons"; -import { IconButton } from "@radix-ui/themes"; +import { Container, Flex, IconButton } from "@radix-ui/themes"; type ScrollToBottomButtonProps = { onClick: () => void; @@ -9,19 +9,29 @@ export const ScrollToBottomButton = ({ onClick, }: ScrollToBottomButtonProps) => { return ( - <IconButton - title="Follow stream" + <Container style={{ position: "absolute", - width: 35, - height: 35, bottom: 15, - right: 15, - zIndex: 1, + left: 0, + right: 0, + pointerEvents: "none", }} - onClick={onClick} > - <ArrowDownIcon width={21} height={21} /> - </IconButton> + <Flex justify="end" pr="4"> + <IconButton + title="Follow stream" + style={{ + width: 35, + height: 35, + zIndex: 1, + pointerEvents: "auto", + }} + onClick={onClick} + > + <ArrowDownIcon width={21} height={21} /> + </IconButton> + </Flex> + </Container> ); }; diff --git a/refact-agent/gui/src/components/Select/Select.tsx b/refact-agent/gui/src/components/Select/Select.tsx index 74cf7b4ea..5fee06ec3 100644 --- a/refact-agent/gui/src/components/Select/Select.tsx +++ b/refact-agent/gui/src/components/Select/Select.tsx @@ -82,7 +82,9 @@ export const Select: React.FC<SelectProps> = ({ !isOpen ? ( <HoverCard.Root openDelay={1000}> <HoverCard.Trigger> - <Trigger /> + <span> + <Trigger /> + </span> </HoverCard.Trigger> <HoverCard.Content size="1" side="top"> {maybeSelectedOption.tooltip} diff --git a/refact-agent/gui/src/components/Select/select.module.css b/refact-agent/gui/src/components/Select/select.module.css index d6201adb1..84a15496b 100644 --- a/refact-agent/gui/src/components/Select/select.module.css +++ b/refact-agent/gui/src/components/Select/select.module.css @@ -2,12 +2,19 @@ overflow-wrap: anywhere; } +.compactTrigger { + display: inline-flex; + align-items: center; + line-height: 1; + padding-block: 0; +} + .content { /* JB doesn't support dvw yet */ max-width: 50vw; max-width: 80dvw; max-height: 300px; - z-index: 9999; + z-index: var(--z-dropdown, 100); } /* Show only plain text in trigger, hide rich content */ @@ -23,7 +30,7 @@ /* Fix Radix Themes Select - remove inner scrollbars */ :global(.rt-SelectContent) { max-height: 400px; - z-index: 9999; + z-index: var(--z-dropdown, 100); } :global(.rt-SelectContent [data-radix-scroll-area-viewport]) { @@ -38,12 +45,13 @@ position: relative; } -/* Fix checkmark indicator positioning */ +/* Fix checkmark indicator positioning - vertically centered */ :global(.rt-SelectItem .rt-SelectItemIndicator), :global(.rt-SelectItem [data-state]) { position: absolute; left: 8px; - top: 8px; + top: 50%; + transform: translateY(-50%); } :global(.rt-SelectViewport) { diff --git a/refact-agent/gui/src/components/Sidebar/GroupTree/ConfirmGroupSelection/ConfirmGroupSelection.module.css b/refact-agent/gui/src/components/Sidebar/GroupTree/ConfirmGroupSelection/ConfirmGroupSelection.module.css index 014f7d9e1..c79bd3b5c 100644 --- a/refact-agent/gui/src/components/Sidebar/GroupTree/ConfirmGroupSelection/ConfirmGroupSelection.module.css +++ b/refact-agent/gui/src/components/Sidebar/GroupTree/ConfirmGroupSelection/ConfirmGroupSelection.module.css @@ -7,6 +7,9 @@ margin-right: auto; /* background: var(--color-panel-translucent, #181a20); */ min-height: fit-content; + /* Force GPU compositing to fix JCEF repaint issues in JetBrains IDEs */ + transform: translateZ(0); + will-change: transform; } .groupName { diff --git a/refact-agent/gui/src/components/Sidebar/GroupTree/CustomTreeNode.module.css b/refact-agent/gui/src/components/Sidebar/GroupTree/CustomTreeNode.module.css index 2d4821608..066b2fd86 100644 --- a/refact-agent/gui/src/components/Sidebar/GroupTree/CustomTreeNode.module.css +++ b/refact-agent/gui/src/components/Sidebar/GroupTree/CustomTreeNode.module.css @@ -1,11 +1,12 @@ .treeNode { --left-padding: 12px; - border-radius: 4px; + border-radius: var(--radius-2); height: 100%; width: calc(100% - var(--left-padding)); box-sizing: border-box; cursor: pointer; - /* padding-left: 12px !important; */ margin-left: var(--left-padding); - transition: all 0.2s ease; + transition: + background-color 0.15s ease, + color 0.15s ease; } diff --git a/refact-agent/gui/src/components/Sidebar/GroupTree/GroupTree.module.css b/refact-agent/gui/src/components/Sidebar/GroupTree/GroupTree.module.css index 3d6ebd334..02a47a66b 100644 --- a/refact-agent/gui/src/components/Sidebar/GroupTree/GroupTree.module.css +++ b/refact-agent/gui/src/components/Sidebar/GroupTree/GroupTree.module.css @@ -24,14 +24,13 @@ div > .sidebarTree { outline-offset: -2px; } -/* TODO: still doesn't work properly for last item in the tree */ -/* Animation for collapse/expand - target all possible elements */ +/* Animation for collapse/expand */ .sidebarTree [role="treeitem"], .sidebarTree [aria-expanded="true"], -.sidebarTree [aria-expanded="false"], -.sidebarTree div, -.sidebarTree svg { - transition: all 0.2s ease; +.sidebarTree [aria-expanded="false"] { + transition: + background-color 0.15s ease, + color 0.15s ease; } /* Give folder rows a slightly different appearance */ diff --git a/refact-agent/gui/src/components/Sidebar/Sidebar.module.css b/refact-agent/gui/src/components/Sidebar/Sidebar.module.css index 8a91baa6c..14f8a9087 100644 --- a/refact-agent/gui/src/components/Sidebar/Sidebar.module.css +++ b/refact-agent/gui/src/components/Sidebar/Sidebar.module.css @@ -24,3 +24,7 @@ .popup_ide { width: calc(100vw - var(--space-2) * 2); } + +.taskItem:hover { + background: var(--gray-4); +} diff --git a/refact-agent/gui/src/components/Sidebar/Sidebar.tsx b/refact-agent/gui/src/components/Sidebar/Sidebar.tsx index 94c9bcf49..28cb639cd 100644 --- a/refact-agent/gui/src/components/Sidebar/Sidebar.tsx +++ b/refact-agent/gui/src/components/Sidebar/Sidebar.tsx @@ -1,10 +1,16 @@ -import React, { useCallback } from "react"; +import React, { useCallback, useRef } from "react"; import { Box, Flex, Spinner } from "@radix-ui/themes"; import { ChatHistory, type ChatHistoryProps } from "../ChatHistory"; -import { useAppSelector, useAppDispatch } from "../../hooks"; +import { ScrollArea } from "../ScrollArea"; +import { + useAppSelector, + useAppDispatch, + useLoadMoreHistory, +} from "../../hooks"; import { ChatHistoryItem, deleteChatById, + updateChatTitleById, } from "../../features/History/historySlice"; import { push } from "../../features/Pages/pagesSlice"; import { restoreChat } from "../../features/Chat/Thread"; @@ -15,6 +21,11 @@ import { getErrorMessage, clearError } from "../../features/Errors/errorsSlice"; import classNames from "classnames"; import { selectHost } from "../../features/Config/configSlice"; import styles from "./Sidebar.module.css"; +import { + useListTasksQuery, + useDeleteTaskMutation, + useUpdateTaskMetaMutation, +} from "../../services/refact/tasks"; export type SidebarProps = { takingNotes: boolean; @@ -30,14 +41,34 @@ export type SidebarProps = { >; export const Sidebar: React.FC<SidebarProps> = ({ takingNotes, style }) => { - // TODO: these can be lowered. const dispatch = useAppDispatch(); const globalError = useAppSelector(getErrorMessage); const currentHost = useAppSelector(selectHost); - const history = useAppSelector((app) => app.history, { - // TODO: selector issue here + const history = useAppSelector((app) => app.history.chats, { devModeChecks: { stabilityCheck: "never" }, }); + const historyIsLoading = useAppSelector((app) => app.history.isLoading); + const historyLoadError = useAppSelector((app) => app.history.loadError); + const { + data: tasks, + isLoading: tasksIsLoading, + isError: tasksIsError, + } = useListTasksQuery(undefined, { + refetchOnMountOrArgChange: true, + }); + const [deleteTask] = useDeleteTaskMutation(); + const [updateTaskMeta] = useUpdateTaskMetaMutation(); + const { + loadMore: loadMoreHistoryAsync, + hasMore: hasMoreHistory, + isLoading: isLoadingMoreHistory, + error: loadMoreError, + retry: retryLoadMore, + } = useLoadMoreHistory(); + + const loadMoreHistory = useCallback(() => { + void loadMoreHistoryAsync(); + }, [loadMoreHistoryAsync]); const onDeleteHistoryItem = useCallback( (id: string) => dispatch(deleteChatById(id)), @@ -52,21 +83,77 @@ export const Sidebar: React.FC<SidebarProps> = ({ takingNotes, style }) => { [dispatch], ); + const handleTaskClick = useCallback( + (taskId: string) => { + dispatch(push({ name: "task workspace", taskId })); + }, + [dispatch], + ); + + const handleDeleteTask = useCallback( + (taskId: string) => { + void deleteTask(taskId); + }, + [deleteTask], + ); + + const handleRenameTask = useCallback( + (taskId: string, newName: string) => { + void updateTaskMeta({ taskId, name: newName }); + }, + [updateTaskMeta], + ); + + const onRenameChat = useCallback( + (id: string, newTitle: string) => { + dispatch(updateChatTitleById({ chatId: id, newTitle })); + }, + [dispatch], + ); + + const scrollAreaRef = useRef<HTMLDivElement>(null); + return ( - <Flex style={style}> + <Flex + style={{ + ...style, + flexDirection: "column", + height: "100%", + overflow: "hidden", + }} + > <FeatureMenu /> - <Flex mt="4"> + <Flex mt="1"> <Box position="absolute" ml="5" mt="2"> <Spinner loading={takingNotes} title="taking notes" /> </Box> </Flex> - <ChatHistory - history={history} - onHistoryItemClick={onHistoryItemClick} - onDeleteHistoryItem={onDeleteHistoryItem} - /> - {/* TODO: duplicated */} + <Box style={{ overflow: "hidden", flex: 1 }}> + <ScrollArea scrollbars="vertical" ref={scrollAreaRef}> + <ChatHistory + history={history} + tasks={tasks} + isLoading={historyIsLoading || tasksIsLoading} + onHistoryItemClick={onHistoryItemClick} + onDeleteHistoryItem={onDeleteHistoryItem} + onRenameHistoryItem={onRenameChat} + onTaskClick={handleTaskClick} + onDeleteTask={handleDeleteTask} + onRenameTask={handleRenameTask} + onLoadMore={loadMoreHistory} + hasMore={hasMoreHistory} + isLoadingMore={isLoadingMoreHistory} + loadMoreError={loadMoreError} + onRetryLoadMore={retryLoadMore} + hasConnectionError={!!historyLoadError || tasksIsError} + compactView={true} + noScroll={true} + scrollContainerRef={scrollAreaRef} + /> + </ScrollArea> + </Box> + {globalError && ( <ErrorCallout mx="0" diff --git a/refact-agent/gui/src/components/StatusDot/StatusDot.module.css b/refact-agent/gui/src/components/StatusDot/StatusDot.module.css new file mode 100644 index 000000000..32898bfbb --- /dev/null +++ b/refact-agent/gui/src/components/StatusDot/StatusDot.module.css @@ -0,0 +1,71 @@ +.dot { + border-radius: 50%; + flex-shrink: 0; +} + +.small { + width: 8px; + height: 8px; +} + +.medium { + width: 10px; + height: 10px; +} + +/* Idle - gray dot */ +.idle { + background-color: var(--gray-8); +} + +/* In Progress - pulsating blue */ +.inProgress { + background-color: var(--blue-9); + animation: pulseBlue 1.5s ease-in-out infinite; +} + +@keyframes pulseBlue { + 0% { + opacity: 1; + box-shadow: 0 0 0 0 var(--blue-a7); + } + 50% { + opacity: 0.8; + box-shadow: 0 0 0 4px transparent; + } + 100% { + opacity: 1; + box-shadow: 0 0 0 0 transparent; + } +} + +/* Needs Attention - pulsating yellow/amber */ +.needsAttention { + background-color: var(--amber-9); + animation: pulseYellow 1.5s ease-in-out infinite; +} + +@keyframes pulseYellow { + 0% { + opacity: 1; + box-shadow: 0 0 0 0 var(--amber-a7); + } + 50% { + opacity: 0.85; + box-shadow: 0 0 0 4px transparent; + } + 100% { + opacity: 1; + box-shadow: 0 0 0 0 transparent; + } +} + +/* Error - red dot */ +.error { + background-color: var(--red-9); +} + +/* Completed - green dot */ +.completed { + background-color: var(--green-9); +} diff --git a/refact-agent/gui/src/components/StatusDot/StatusDot.tsx b/refact-agent/gui/src/components/StatusDot/StatusDot.tsx new file mode 100644 index 000000000..60309cf4c --- /dev/null +++ b/refact-agent/gui/src/components/StatusDot/StatusDot.tsx @@ -0,0 +1,58 @@ +import React from "react"; +import { HoverCard, Text } from "@radix-ui/themes"; +import styles from "./StatusDot.module.css"; + +export type StatusDotState = + | "idle" + | "in_progress" + | "needs_attention" + | "error" + | "completed"; + +export interface StatusDotProps { + state: StatusDotState; + size?: "small" | "medium"; + tooltipText?: string; +} + +const STATE_TOOLTIPS: Record<StatusDotState, string> = { + idle: "Idle", + in_progress: "In progress...", + needs_attention: "Needs your attention", + error: "An error occurred", + completed: "Completed", +}; + +const STATE_CLASS_MAP: Record<StatusDotState, string> = { + idle: styles.idle, + in_progress: styles.inProgress, + needs_attention: styles.needsAttention, + error: styles.error, + completed: styles.completed, +}; + +export const StatusDot: React.FC<StatusDotProps> = ({ + state, + size = "small", + tooltipText, +}) => { + const sizeClass = size === "small" ? styles.small : styles.medium; + const stateClass = STATE_CLASS_MAP[state]; + const tooltip = tooltipText ?? STATE_TOOLTIPS[state]; + + return ( + <HoverCard.Root openDelay={200} closeDelay={100}> + <HoverCard.Trigger> + <div + className={`${styles.dot} ${sizeClass} ${stateClass}`} + aria-label={tooltip} + /> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <Text as="p" size="1"> + {tooltip} + </Text> + </HoverCard.Content> + </HoverCard.Root> + ); +}; diff --git a/refact-agent/gui/src/components/StatusDot/index.ts b/refact-agent/gui/src/components/StatusDot/index.ts new file mode 100644 index 000000000..2f27d1047 --- /dev/null +++ b/refact-agent/gui/src/components/StatusDot/index.ts @@ -0,0 +1,2 @@ +export { StatusDot } from "./StatusDot"; +export type { StatusDotState, StatusDotProps } from "./StatusDot"; diff --git a/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.module.css b/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.module.css new file mode 100644 index 000000000..7edbb750a --- /dev/null +++ b/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.module.css @@ -0,0 +1,79 @@ +.widget { + background: transparent; +} + +.header { + cursor: pointer; + opacity: 0.8; + transition: opacity 0.15s ease; +} + +.header:hover { + opacity: 1; +} + +.taskRow { + padding: var(--space-1) var(--space-2); + border-radius: var(--radius-2); + border-left: 2px solid transparent; + transition: + background-color 0.2s ease, + border-color 0.2s ease, + opacity 0.2s ease; + opacity: 0.8; +} + +.taskRow:hover { + opacity: 1; +} + +.taskRow.active { + background: var(--accent-a3); + border-left-color: var(--accent-9); + opacity: 1; +} + +.activeHint { +} + +.headerIcon { + color: var(--gray-11); + flex-shrink: 0; +} + +.content { + animation: slideDown 0.2s ease-out; + transform-origin: top; +} + +@keyframes slideDown { + from { + opacity: 0; + transform: translateY(-8px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.taskList { + display: flex; + flex-direction: column; + gap: var(--space-2); +} + +.taskRowEnter { + animation: taskFadeIn 0.2s ease-out forwards; +} + +@keyframes taskFadeIn { + from { + opacity: 0; + transform: translateX(-8px); + } + to { + opacity: 0.8; + transform: translateX(0); + } +} diff --git a/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.tsx b/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.tsx new file mode 100644 index 000000000..821be37b5 --- /dev/null +++ b/refact-agent/gui/src/components/TaskProgressWidget/TaskProgressWidget.tsx @@ -0,0 +1,195 @@ +import React, { useCallback } from "react"; +import * as Collapsible from "@radix-ui/react-collapsible"; +import { Flex, Text, Box, Separator } from "@radix-ui/themes"; +import classNames from "classnames"; + +import { useAppSelector, useAppDispatch } from "../../hooks"; +import { + selectChatId, + selectCurrentTasks, + selectHasTasks, + selectTasksEverUsed, + selectTaskProgress, + selectTaskWidgetExpanded, + selectIsStreaming, + setTaskWidgetExpanded, +} from "../../features/Chat/Thread"; +import type { TodoItem, TodoStatus } from "../../features/Chat/Thread/types"; +import { Chevron } from "../Collapsible"; + +import { StatusDot, type StatusDotState } from "../StatusDot"; +import { CircularProgress } from "../ChatHistory/CircularProgress"; +import styles from "./TaskProgressWidget.module.css"; + +function getStatusDotState( + status: TodoStatus, + _isStreaming: boolean, +): StatusDotState { + switch (status) { + case "in_progress": + return "in_progress"; // Blue pulsing for in-progress tasks + case "completed": + return "completed"; // Green solid for completed + case "failed": + return "error"; // Red for failed + case "pending": + default: + return "idle"; // Gray for pending + } +} + +const STATUS_TOOLTIPS: Record<TodoStatus, string> = { + completed: "Completed", + in_progress: "In progress", + pending: "Pending", + failed: "Failed", +}; + +type StatusIconProps = { + status: TodoStatus; + isStreaming?: boolean; +}; + +const StatusIcon: React.FC<StatusIconProps> = ({ + status, + isStreaming = false, +}) => { + const dotState = getStatusDotState(status, isStreaming); + return ( + <StatusDot + state={dotState} + size="small" + tooltipText={STATUS_TOOLTIPS[status]} + /> + ); +}; + +type TaskRowProps = { + task: TodoItem; + isStreaming: boolean; +}; + +const TaskRow: React.FC<TaskRowProps> = ({ task, isStreaming }) => { + const isActive = task.status === "in_progress"; + + return ( + <Flex + align="center" + gap="2" + className={classNames(styles.taskRow, { [styles.active]: isActive })} + > + <StatusIcon status={task.status} isStreaming={isStreaming && isActive} /> + <Text size="1" style={{ flex: 1 }}> + {task.content} + </Text> + </Flex> + ); +}; + +export const TaskProgressWidget: React.FC = () => { + const dispatch = useAppDispatch(); + const chatId = useAppSelector(selectChatId); + const hasTasks = useAppSelector(selectHasTasks); + const everUsed = useAppSelector(selectTasksEverUsed); + const tasks = useAppSelector(selectCurrentTasks); + const isExpanded = useAppSelector(selectTaskWidgetExpanded); + const isStreaming = useAppSelector(selectIsStreaming); + const { done, total, activeTitle } = useAppSelector(selectTaskProgress); + + const handleOpenChange = useCallback( + (open: boolean) => { + if (chatId) { + dispatch(setTaskWidgetExpanded({ id: chatId, expanded: open })); + } + }, + [dispatch, chatId], + ); + + if (!everUsed) return null; + + return ( + <Box className={styles.widget}> + <Collapsible.Root open={isExpanded} onOpenChange={handleOpenChange}> + <Collapsible.Trigger asChild> + <Flex className={styles.header} align="center" gap="3" px="3" py="2"> + <Flex align="center" gap="2" style={{ flex: 1 }}> + {!isExpanded && hasTasks && ( + <> + <Flex gap="1" align="center"> + {tasks.map((task) => ( + <StatusIcon + key={task.id} + status={task.status} + isStreaming={ + task.status === "in_progress" && isStreaming + } + /> + ))} + </Flex> + + <CircularProgress done={done} total={total} size={14} /> + + {activeTitle && ( + <Text size="1" color="gray" className={styles.activeHint}> + {activeTitle} + </Text> + )} + </> + )} + + {!isExpanded && !hasTasks && ( + <Text size="1" color="gray"> + Tasks cleared + </Text> + )} + + {isExpanded && ( + <Text size="1" weight="medium"> + Task Progress + </Text> + )} + </Flex> + + <Chevron open={isExpanded} /> + </Flex> + </Collapsible.Trigger> + + <Collapsible.Content> + <Flex + direction="column" + gap="2" + px="3" + pb="3" + className={styles.content} + > + {hasTasks ? ( + <> + <div className={styles.taskList}> + {tasks.map((task, index) => ( + <div + key={task.id} + className={styles.taskRowEnter} + style={{ animationDelay: `${index * 50}ms` }} + > + <TaskRow task={task} isStreaming={isStreaming} /> + </div> + ))} + </div> + <Separator size="4" /> + <Text size="1" color="gray"> + {done}/{total} completed + </Text> + </> + ) : ( + <Text size="1" color="gray"> + No active tasks + </Text> + )} + </Flex> + </Collapsible.Content> + </Collapsible.Root> + </Box> + ); +}; + +export default TaskProgressWidget; diff --git a/refact-agent/gui/src/components/TaskProgressWidget/index.ts b/refact-agent/gui/src/components/TaskProgressWidget/index.ts new file mode 100644 index 000000000..a8f7e6107 --- /dev/null +++ b/refact-agent/gui/src/components/TaskProgressWidget/index.ts @@ -0,0 +1,2 @@ +export { TaskProgressWidget } from "./TaskProgressWidget"; +export { default } from "./TaskProgressWidget"; diff --git a/refact-agent/gui/src/components/TextArea/TextArea.module.css b/refact-agent/gui/src/components/TextArea/TextArea.module.css index 1d255bd83..63aa0b0c5 100644 --- a/refact-agent/gui/src/components/TextArea/TextArea.module.css +++ b/refact-agent/gui/src/components/TextArea/TextArea.module.css @@ -1,16 +1,45 @@ +/* The .textarea class is applied to the RadixTextArea component, + which renders as rt-TextAreaRoot > rt-TextAreaInput. + The className goes to the ROOT element (rt-TextAreaRoot), not the input. */ .textarea { border: none; box-shadow: none; outline: none; + background-color: transparent; /* JB doesn't support dvh */ max-height: 50vh; max-height: 50dvh; } +/* Override Radix variant-surface background */ +.textarea:global(.rt-variant-surface) { + background-color: transparent; + box-shadow: none; +} + +/* Override Radix TextArea inner elements */ +.textarea :global(.rt-TextAreaChrome), +.textarea :global(.rt-TextAreaInput) { + background-color: transparent; +} + .textarea :global(.rt-TextAreaInput) { outline: none; } +.textarea :global(.rt-TextAreaInput[readonly]) { + color: var(--gray-12); + -webkit-text-fill-color: var(--gray-12); + opacity: 1; + background-color: transparent !important; +} + +.textarea:has(:global(.rt-TextAreaInput[readonly])), +.textarea:has(:global(.rt-TextAreaInput[readonly])) :global(*) { + background-color: transparent !important; + background: transparent !important; +} + :global(.rt-TextAreaRoot) > .textarea { background-color: unset; } diff --git a/refact-agent/gui/src/components/TextAreaWithChips/TextAreaWithChips.tsx b/refact-agent/gui/src/components/TextAreaWithChips/TextAreaWithChips.tsx new file mode 100644 index 000000000..ec96a272d --- /dev/null +++ b/refact-agent/gui/src/components/TextAreaWithChips/TextAreaWithChips.tsx @@ -0,0 +1,16 @@ +import React from "react"; +import { TextArea, type TextAreaProps } from "../TextArea/TextArea"; + +type TextAreaWithChipsProps = TextAreaProps & { + host: string; + onOpenFile?: (file: { file_path: string; line?: number }) => Promise<void>; +}; + +export const TextAreaWithChips = React.forwardRef< + HTMLTextAreaElement, + TextAreaWithChipsProps +>(({ host: _host, onOpenFile: _onOpenFile, ...props }, ref) => { + return <TextArea {...props} ref={ref} />; +}); + +TextAreaWithChips.displayName = "TextAreaWithChips"; diff --git a/refact-agent/gui/src/components/TextAreaWithChips/index.ts b/refact-agent/gui/src/components/TextAreaWithChips/index.ts new file mode 100644 index 000000000..d205e8ca4 --- /dev/null +++ b/refact-agent/gui/src/components/TextAreaWithChips/index.ts @@ -0,0 +1 @@ +export { TextAreaWithChips } from "./TextAreaWithChips"; diff --git a/refact-agent/gui/src/components/Theme/Theme.module.css b/refact-agent/gui/src/components/Theme/Theme.module.css new file mode 100644 index 000000000..009af67fb --- /dev/null +++ b/refact-agent/gui/src/components/Theme/Theme.module.css @@ -0,0 +1,26 @@ +.themeToggle { + position: fixed; + z-index: var(--z-fixed, 400); + right: 15px; + top: 15px; + display: flex; + align-items: center; + justify-content: center; + width: 28px; + height: 28px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-11); + cursor: pointer; + transition: filter 0.15s ease; +} + +.themeToggle svg { + width: 15px; + height: 15px; +} + +.themeToggle:hover { + filter: brightness(1.5); +} diff --git a/refact-agent/gui/src/components/Theme/Theme.tsx b/refact-agent/gui/src/components/Theme/Theme.tsx index f6f1c2b27..9f92fe604 100644 --- a/refact-agent/gui/src/components/Theme/Theme.tsx +++ b/refact-agent/gui/src/components/Theme/Theme.tsx @@ -1,8 +1,10 @@ import React from "react"; -import { Theme as RadixTheme, IconButton } from "@radix-ui/themes"; +import { Theme as RadixTheme } from "@radix-ui/themes"; import { MoonIcon, SunIcon } from "@radix-ui/react-icons"; import "@radix-ui/themes/styles.css"; import "./theme-config.css"; +import "../shared/tokens.css"; +import styles from "./Theme.module.css"; import { useAppearance, useConfig } from "../../hooks"; export type ThemeProps = { @@ -49,15 +51,15 @@ const ThemeWithDarkMode: React.FC<ThemeProps> = ({ children, ...props }) => { const Icon = isDarkMode ? MoonIcon : SunIcon; return ( <RadixTheme {...props} appearance={isDarkMode ? "dark" : "light"}> - <IconButton - variant="surface" - color="gray" + <button + type="button" + className={styles.themeToggle} title="toggle dark mode" - style={{ position: "fixed", zIndex: 999, right: 15, top: 15 }} onClick={toggle} + aria-label="Toggle dark mode" > <Icon /> - </IconButton> + </button> {/** TODO: remove this in production */} {/** use cmd + c to open and close */} {/* <ThemePanel defaultOpen={false} /> */} diff --git a/refact-agent/gui/src/components/Theme/theme-config.css b/refact-agent/gui/src/components/Theme/theme-config.css index 66038a6f7..1d4c7a867 100644 --- a/refact-agent/gui/src/components/Theme/theme-config.css +++ b/refact-agent/gui/src/components/Theme/theme-config.css @@ -1,17 +1,22 @@ -/** Override radix-themes defaults **/ - .radix-themes { - /* --default-font-family: Space Grotesk, sans-serif; */ + --default-font-family: system-ui, -apple-system, BlinkMacSystemFont, + "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + --code-font-family: "SF Mono", "Fira Code", "JetBrains Mono", Consolas, + "Liberation Mono", Menlo, monospace; + --default-font-size: 14px; + --default-line-height: 1.5; +} + +:where(.radix-themes) :where(.rt-Code) { + font-family: var(--code-font-family); } -/** Force dark tooltips in dark theme **/ :where(.radix-themes) :where(.rt-TooltipContent) { background-color: #1a1a1a !important; color: #fcfcfc !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; } -/** Light theme tooltips **/ :where(.radix-themes[data-appearance="light"]) :where(.rt-TooltipContent) { background-color: #fcfcfc !important; color: #1a1a1a !important; diff --git a/refact-agent/gui/src/components/Toolbar/Dropdown.tsx b/refact-agent/gui/src/components/Toolbar/Dropdown.tsx index a195a930f..6635e686d 100644 --- a/refact-agent/gui/src/components/Toolbar/Dropdown.tsx +++ b/refact-agent/gui/src/components/Toolbar/Dropdown.tsx @@ -1,6 +1,10 @@ import React, { useCallback, useEffect, useMemo } from "react"; -import { selectHost, type Config } from "../../features/Config/configSlice"; -import { useTourRefs } from "../../features/Tour"; +import { + selectHost, + selectAddressURL, + type Config, +} from "../../features/Config/configSlice"; +import { push } from "../../features/Pages/pagesSlice"; import { useGetUser, useLogout, @@ -15,7 +19,6 @@ import { DropdownMenu, Flex, HoverCard, - IconButton, // Select, Text, } from "@radix-ui/themes"; @@ -25,13 +28,13 @@ import { QuestionMarkCircledIcon, GearIcon, } from "@radix-ui/react-icons"; -import { clearHistory } from "../../features/History/historySlice"; +import styles from "./Toolbar.module.css"; + import { PuzzleIcon } from "../../images/PuzzleIcon"; import { Coin } from "../../images"; import { useCoinBallance } from "../../hooks/useCoinBalance"; import { isUserWithLoginMessage } from "../../services/smallcloud/types"; -import { resetActiveGroup, selectActiveGroup } from "../../features/Teams"; -import { popBackTo } from "../../features/Pages/pagesSlice"; + import { useActiveTeamsGroup } from "../../hooks/useActiveTeamsGroup"; export type DropdownNavigationOptions = @@ -39,25 +42,22 @@ export type DropdownNavigationOptions = | "stats" | "settings" | "hot keys" - | "restart tour" | "login page" | "integrations" | "providers" + | "knowledge graph" + | "customization" + | "default models" | ""; type DropdownProps = { handleNavigation: (to: DropdownNavigationOptions) => void; + triggerClassName?: string; + useGhostTrigger?: boolean; }; -function linkForBugReports(host: Config["host"]): string { - switch (host) { - case "vscode": - return "https://github.com/smallcloudai/refact-vscode/issues"; - case "jetbrains": - return "https://github.com/smallcloudai/refact-intellij/issues"; - default: - return "https://github.com/smallcloudai/refact-chat-js/issues"; - } +function linkForBugReports(_host: Config["host"]): string { + return "https://github.com/smallcloudai/refact/issues"; } function linkForAccount(host: Config["host"]): string { @@ -73,15 +73,15 @@ function linkForAccount(host: Config["host"]): string { export const Dropdown: React.FC<DropdownProps> = ({ handleNavigation, + triggerClassName, }: DropdownProps) => { - const refs = useTourRefs(); + const dispatch = useAppDispatch(); const user = useGetUser(); const host = useAppSelector(selectHost); - const activeGroup = useAppSelector(selectActiveGroup); - const dispatch = useAppDispatch(); // TODO: check how much of this is still used. // const { maxAgentUsageAmount, currentAgentUsage } = useAgentUsage(); const coinBalance = useCoinBallance(); + const addressURL = useAppSelector(selectAddressURL); const logout = useLogout(); const { startPollingForUser } = useStartPollingForUser(); @@ -91,22 +91,7 @@ export const Dropdown: React.FC<DropdownProps> = ({ const discordUrl = "https://www.smallcloud.ai/discord"; const accountLink = linkForAccount(host); const openUrl = useOpenUrl(); - const { - openCustomizationFile, - openPrivacyFile, - setLoginMessage, - clearActiveTeamsGroupInIDE, - } = useEventsBusForIDE(); - - const handleChatHistoryCleanUp = () => { - dispatch(clearHistory()); - }; - - const handleActiveGroupCleanUp = () => { - clearActiveTeamsGroupInIDE(); - const actions = [resetActiveGroup(), popBackTo({ name: "history" })]; - actions.forEach((action) => dispatch(action)); - }; + const { openPrivacyFile, setLoginMessage } = useEventsBusForIDE(); const handleProUpgradeClick = useCallback(() => { startPollingForUser(); @@ -126,11 +111,24 @@ export const Dropdown: React.FC<DropdownProps> = ({ return ( <DropdownMenu.Root> - <DropdownMenu.Trigger> - <IconButton variant="outline" ref={(x) => refs.setMore(x)}> - <HamburgerMenuIcon /> - </IconButton> - </DropdownMenu.Trigger> + <HoverCard.Root> + <HoverCard.Trigger> + <DropdownMenu.Trigger> + <button + type="button" + className={triggerClassName ?? styles.iconButton} + aria-label="Menu" + > + <HamburgerMenuIcon /> + </button> + </DropdownMenu.Trigger> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + Menu + </Text> + </HoverCard.Content> + </HoverCard.Root> <DropdownMenu.Content> {user.data && ( @@ -260,10 +258,13 @@ export const Dropdown: React.FC<DropdownProps> = ({ <GearIcon /> Configure Providers </DropdownMenu.Item> + <DropdownMenu.Item onSelect={() => handleNavigation("default models")}> + <GearIcon /> Default Models + </DropdownMenu.Item> + {isKnowledgeFeatureAvailable && ( <DropdownMenu.Item - // TODO: get real URL from cloud inference - onSelect={() => openUrl("https://flexus.team/")} + onSelect={() => handleNavigation("knowledge graph")} > Manage Knowledge </DropdownMenu.Item> @@ -277,12 +278,8 @@ export const Dropdown: React.FC<DropdownProps> = ({ IDE Hotkeys </DropdownMenu.Item> - <DropdownMenu.Item - onSelect={() => { - void openCustomizationFile(); - }} - > - Edit customization.yaml + <DropdownMenu.Item onSelect={() => handleNavigation("customization")}> + Customize Modes & Agents </DropdownMenu.Item> <DropdownMenu.Item @@ -295,10 +292,6 @@ export const Dropdown: React.FC<DropdownProps> = ({ <DropdownMenu.Separator /> - <DropdownMenu.Item onSelect={() => handleNavigation("restart tour")}> - Restart tour - </DropdownMenu.Item> - <DropdownMenu.Item onSelect={(event) => { event.preventDefault(); @@ -316,28 +309,23 @@ export const Dropdown: React.FC<DropdownProps> = ({ Your Stats </DropdownMenu.Item> - <DropdownMenu.Item onSelect={handleChatHistoryCleanUp}> - Clear Chat History - </DropdownMenu.Item> - - {isKnowledgeFeatureAvailable && ( + {addressURL?.trim().toLowerCase() === "refact" && user.data ? ( <DropdownMenu.Item - onSelect={handleActiveGroupCleanUp} - disabled={activeGroup === null} + onSelect={(event) => { + event.preventDefault(); + logout(); + handleNavigation("login page"); + }} > - Unselect Active Group + Logout </DropdownMenu.Item> - )} - - <DropdownMenu.Item - onSelect={(event) => { - event.preventDefault(); - logout(); - handleNavigation("login page"); - }} - > - Logout - </DropdownMenu.Item> + ) : !user.data ? ( + <DropdownMenu.Item + onSelect={() => dispatch(push({ name: "login page" }))} + > + Login to Refact Cloud + </DropdownMenu.Item> + ) : null} </DropdownMenu.Content> </DropdownMenu.Root> ); diff --git a/refact-agent/gui/src/components/Toolbar/Toolbar.module.css b/refact-agent/gui/src/components/Toolbar/Toolbar.module.css index 2c53d33f0..286667205 100644 --- a/refact-agent/gui/src/components/Toolbar/Toolbar.module.css +++ b/refact-agent/gui/src/components/Toolbar/Toolbar.module.css @@ -1,8 +1,217 @@ -.RenameInput { - --base-button-height: var(--space-6); - outline: none; +.toolbar { + display: flex; + align-items: center; + background: transparent; + height: 36px; + padding: 0 var(--space-2); + gap: 0; +} + +.toolbarSection { + display: flex; + align-items: center; + flex-shrink: 0; + height: 100%; + gap: 2px; +} + +.iconButton { + display: flex; + align-items: center; + justify-content: center; + width: 28px; + height: 28px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-11); + cursor: pointer; + transition: filter 0.15s ease; + flex-shrink: 0; +} + +.iconButton svg { + width: 15px; + height: 15px; +} + +.iconButton:hover:not(:disabled) { + filter: brightness(1.5); +} + +.iconButton:disabled { + opacity: 0.4; + cursor: not-allowed; +} + +.iconButton:focus-visible { + outline: 2px solid var(--accent-8); + outline-offset: -2px; + box-shadow: none; +} + +.homeButton:hover { + filter: brightness(1.2); +} + +.toolbarDivider { + width: 1px; + height: 16px; + background: var(--gray-a5); + margin: 0 var(--space-2); + flex-shrink: 0; +} + +.tabsContainer { + display: flex; + align-items: center; + flex: 1; + min-width: 0; + height: 100%; + overflow-x: auto; + overflow-y: hidden; + scrollbar-width: none; + -ms-overflow-style: none; +} + +.tabsContainer::-webkit-scrollbar { + display: none; +} + +.tabList { + display: flex; + align-items: center; + height: 100%; width: 100%; - margin-right: var(--space-1); - height: var(--base-button-height); - box-shadow: inset 0 0 0 1px var(--accent-a8); + min-width: 0; +} + +.tabWrap { + position: relative; + display: flex; + align-items: center; + flex: 1 1 180px; + min-width: 48px; + max-width: 240px; + height: 100%; +} + +.tabButton { + position: relative; + display: flex; + align-items: center; + gap: var(--space-2); + width: 100%; + height: calc(100% - 8px); + padding: 0 var(--space-2); + padding-right: 26px; + margin: 4px 1px; + background: var(--gray-a3); + border: none; + border-radius: var(--radius-2); + cursor: pointer; + transition: background-color 0.15s ease; + color: var(--gray-11); + font-size: 12px; + text-align: left; +} + +.tabButton:hover { + background: var(--gray-a4); +} + +.tabButton:focus-visible { + outline: 2px solid var(--accent-8); + outline-offset: -2px; +} + +.tabButtonActive { + background: var(--color-background); + color: var(--gray-12); +} + +.tabButtonActive:hover { + background: var(--color-background); +} + +.tabStatus { + display: flex; + align-items: center; + justify-content: center; + flex-shrink: 0; +} + +.tabTitle { + flex: 1; + min-width: 0; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.tabModeBadge { + flex-shrink: 0; + font-size: 9px; + text-transform: lowercase; + white-space: nowrap; +} + +.tabClose { + position: absolute; + right: 8px; + top: 50%; + transform: translateY(-50%); + display: flex; + align-items: center; + justify-content: center; + width: 16px; + height: 16px; + padding: 0; + border: none; + border-radius: var(--radius-1); + background: transparent; + color: var(--gray-10); + cursor: pointer; + opacity: 0; + transition: + opacity 0.15s ease, + background-color 0.15s ease; +} + +.tabWrap:hover .tabClose { + opacity: 1; +} + +.tabClose:hover { + background: var(--gray-a5); + color: var(--gray-12); +} + +.tabButtonActive ~ .tabClose { + opacity: 0.7; +} + +.tabButtonActive ~ .tabClose:hover { + opacity: 1; +} + +.RenameInput { + flex: 1; + min-width: 0; + height: calc(100% - 6px); + margin: 3px 2px; +} + +@media (hover: none) { + .tabClose { + opacity: 1; + } +} + +.homeButtonWrapper { + display: none; +} + +.scrollContainer { + display: none; } diff --git a/refact-agent/gui/src/components/Toolbar/Toolbar.tsx b/refact-agent/gui/src/components/Toolbar/Toolbar.tsx index 4b8307594..101bc5b15 100644 --- a/refact-agent/gui/src/components/Toolbar/Toolbar.tsx +++ b/refact-agent/gui/src/components/Toolbar/Toolbar.tsx @@ -1,59 +1,58 @@ -import { - Button, - DropdownMenu, - Flex, - IconButton, - Spinner, - TabNav, - Text, - TextField, -} from "@radix-ui/themes"; +import { TextField, HoverCard, Text, Badge } from "@radix-ui/themes"; import { Dropdown, DropdownNavigationOptions } from "./Dropdown"; -import { - DotFilledIcon, - DotsVerticalIcon, - HomeIcon, - PlusIcon, -} from "@radix-ui/react-icons"; +import { Cross1Icon, PlusIcon, CheckboxIcon } from "@radix-ui/react-icons"; +import classNames from "classnames"; +import { RefactIcon } from "../../images"; import { newChatAction } from "../../events"; -import { restart, useTourRefs } from "../../features/Tour"; +import { getStatusFromSessionState } from "../../utils/sessionStatus"; import { popBackTo, push } from "../../features/Pages/pagesSlice"; +import { + useCreateTaskMutation, + useUpdateTaskMetaMutation, +} from "../../services/refact/tasks"; +import { + selectOpenTasksFromRoot, + openTask, + closeTask, +} from "../../features/Tasks"; import { ChangeEvent, KeyboardEvent, + MouseEvent, useCallback, useEffect, - useMemo, useRef, useState, } from "react"; +import { updateChatTitleById } from "../../features/History/historySlice"; import { - deleteChatById, - getHistory, - updateChatTitleById, -} from "../../features/History/historySlice"; -import { restoreChat, saveTitle, selectThread } from "../../features/Chat"; -import { TruncateLeft } from "../Text"; + saveTitle, + selectAllThreads, + selectTabsDisplayData, + closeThread, + switchToThread, + selectChatId, + clearThreadPauseReasons, + setThreadConfirmationStatus, +} from "../../features/Chat"; +import { StatusDot } from "../StatusDot"; import { useAppDispatch, useAppSelector, useEventsBusForIDE, } from "../../hooks"; -import { useWindowDimensions } from "../../hooks/useWindowDimensions"; -import { clearPauseReasonsAndHandleToolsStatus } from "../../features/ToolConfirmation/confirmationSlice"; import { telemetryApi } from "../../services/refact/telemetry"; +import { useGetChatModesQuery } from "../../services/refact/chatModes"; import styles from "./Toolbar.module.css"; import { useActiveTeamsGroup } from "../../hooks/useActiveTeamsGroup"; +import { ConnectionStatusIndicator } from "../ConnectionStatus"; +import { getModeColor } from "../../utils/modeColors"; export type DashboardTab = { type: "dashboard"; }; -function isDashboardTab(tab: Tab): tab is DashboardTab { - return tab.type === "dashboard"; -} - export type ChatTab = { type: "chat"; id: string; @@ -63,7 +62,17 @@ function isChatTab(tab: Tab): tab is ChatTab { return tab.type === "chat"; } -export type Tab = DashboardTab | ChatTab; +export type TaskTab = { + type: "task"; + taskId: string; + taskName: string; +}; + +function isTaskTab(tab: Tab): tab is TaskTab { + return tab.type === "task"; +} + +export type Tab = DashboardTab | ChatTab | TaskTab; export type ToolbarProps = { activeTab: Tab; @@ -71,32 +80,28 @@ export type ToolbarProps = { export const Toolbar = ({ activeTab }: ToolbarProps) => { const dispatch = useAppDispatch(); - const tabNav = useRef<HTMLElement | null>(null); - const [tabNavWidth, setTabNavWidth] = useState(0); - const { width: windowWidth } = useWindowDimensions(); - const [focus, setFocus] = useState<HTMLElement | null>(null); + const scrollContainerRef = useRef<HTMLDivElement | null>(null); + const activeTabRef = useRef<HTMLDivElement | null>(null); - const refs = useTourRefs(); const [sendTelemetryEvent] = telemetryApi.useLazySendTelemetryChatEventQuery(); - const history = useAppSelector(getHistory, { - devModeChecks: { stabilityCheck: "never" }, - }); - const isStreaming = useAppSelector((app) => app.chat.streaming); - const { isTitleGenerated, id: chatId } = useAppSelector(selectThread); - const cache = useAppSelector((app) => app.chat.cache); + const tabs = useAppSelector(selectTabsDisplayData); + const allThreads = useAppSelector(selectAllThreads); + const currentChatId = useAppSelector(selectChatId); + const openTasks = useAppSelector(selectOpenTasksFromRoot); const { newChatEnabled } = useActiveTeamsGroup(); + const { data: modesData } = useGetChatModesQuery(undefined); const { openSettings, openHotKeys } = useEventsBusForIDE(); + const [createTask] = useCreateTaskMutation(); - const [isOnlyOneChatTab, setIsOnlyOneChatTab] = useState(false); - const [isRenaming, setIsRenaming] = useState(false); - const [newTitle, setNewTitle] = useState<string | null>(null); - - const shouldChatTabLinkBeNotClickable = useMemo(() => { - return isOnlyOneChatTab && !isDashboardTab(activeTab); - }, [isOnlyOneChatTab, activeTab]); + const [renameState, setRenameState] = useState<{ + kind: "chat" | "task"; + id: string; + value: string; + } | null>(null); + const [updateTaskMeta] = useUpdateTaskMetaMutation(); const handleNavigation = useCallback( (to: DropdownNavigationOptions | "chat") => { @@ -128,15 +133,6 @@ export const Toolbar = ({ activeTab }: ToolbarProps) => { success: true, error_message: "", }); - } else if (to === "restart tour") { - dispatch(popBackTo({ name: "login page" })); - dispatch(push({ name: "welcome" })); - dispatch(restart()); - void sendTelemetryEvent({ - scope: `restartTour`, - success: true, - error_message: "", - }); } else if (to === "integrations") { dispatch(push({ name: "integrations page" })); void sendTelemetryEvent({ @@ -151,6 +147,27 @@ export const Toolbar = ({ activeTab }: ToolbarProps) => { success: true, error_message: "", }); + } else if (to === "knowledge graph") { + dispatch(push({ name: "knowledge graph" })); + void sendTelemetryEvent({ + scope: `openKnowledgeGraph`, + success: true, + error_message: "", + }); + } else if (to === "customization") { + dispatch(push({ name: "customization" })); + void sendTelemetryEvent({ + scope: `openCustomization`, + success: true, + error_message: "", + }); + } else if (to === "default models") { + dispatch(push({ name: "default models" })); + void sendTelemetryEvent({ + scope: `openDefaultModels`, + success: true, + error_message: "", + }); } else if (to === "chat") { dispatch(popBackTo({ name: "history" })); dispatch(push({ name: "chat" })); @@ -160,33 +177,83 @@ export const Toolbar = ({ activeTab }: ToolbarProps) => { ); const onCreateNewChat = useCallback(() => { - setIsRenaming((prev) => (prev ? !prev : prev)); - dispatch(newChatAction()); + setRenameState(null); + + const currentThread = allThreads[currentChatId] as + | { thread: { messages: unknown[] } } + | undefined; + + dispatch(clearThreadPauseReasons({ id: currentChatId })); dispatch( - clearPauseReasonsAndHandleToolsStatus({ + setThreadConfirmationStatus({ + id: currentChatId, wasInteracted: false, confirmationStatus: true, }), ); + + if (currentThread && currentThread.thread.messages.length === 0) { + dispatch(closeThread({ id: currentChatId })); + } + + dispatch(newChatAction()); handleNavigation("chat"); void sendTelemetryEvent({ scope: `openNewChat`, success: true, error_message: "", }); - }, [dispatch, sendTelemetryEvent, handleNavigation]); + }, [ + dispatch, + currentChatId, + allThreads, + sendTelemetryEvent, + handleNavigation, + ]); + + const onCreateNewTask = useCallback(() => { + createTask({ name: "New Task" }) + .unwrap() + .then((task) => { + dispatch(openTask({ id: task.id, name: task.name })); + dispatch(push({ name: "task workspace", taskId: task.id })); + void sendTelemetryEvent({ + scope: `openNewTask`, + success: true, + error_message: "", + }); + }) + .catch(() => { + /* handled by RTK Query */ + }); + }, [createTask, dispatch, sendTelemetryEvent]); const goToTab = useCallback( (tab: Tab) => { + const isSameTab = + (isChatTab(tab) && isChatTab(activeTab) && tab.id === activeTab.id) || + (isTaskTab(tab) && + isTaskTab(activeTab) && + tab.taskId === activeTab.taskId); + + if (isSameTab) { + return; + } + + if (isChatTab(activeTab)) { + const currentThread = allThreads[activeTab.id]; + if (currentThread && currentThread.thread.messages.length === 0) { + dispatch(closeThread({ id: activeTab.id })); + } + } + if (tab.type === "dashboard") { dispatch(popBackTo({ name: "history" })); - dispatch(newChatAction()); + } else if (tab.type === "task") { + dispatch(popBackTo({ name: "history" })); + dispatch(push({ name: "task workspace", taskId: tab.taskId })); } else { - if (shouldChatTabLinkBeNotClickable) return; - const chat = history.find((chat) => chat.id === tab.id); - if (chat != undefined) { - dispatch(restoreChat(chat)); - } + dispatch(switchToThread({ id: tab.id })); dispatch(popBackTo({ name: "history" })); dispatch(push({ name: "chat" })); } @@ -196,198 +263,325 @@ export const Toolbar = ({ activeTab }: ToolbarProps) => { error_message: "", }); }, - [dispatch, history, shouldChatTabLinkBeNotClickable, sendTelemetryEvent], + [dispatch, sendTelemetryEvent, activeTab, allThreads], ); - useEffect(() => { - if (!tabNav.current) { - return; - } - setTabNavWidth(tabNav.current.offsetWidth); - }, [tabNav, windowWidth]); + const handleCloseTaskTab = useCallback( + (event: MouseEvent, taskId: string) => { + event.stopPropagation(); + event.preventDefault(); + dispatch(closeTask(taskId)); + if (isTaskTab(activeTab) && activeTab.taskId === taskId) { + goToTab({ type: "dashboard" }); + } + }, + [dispatch, activeTab, goToTab], + ); useEffect(() => { - if (focus === null) return; - - // the function scrollIntoView doesn't always exist, and will crash on unit tests - // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition - if (focus.scrollIntoView) { - focus.scrollIntoView(); + if (activeTabRef.current?.scrollIntoView) { + activeTabRef.current.scrollIntoView({ + behavior: "smooth", + block: "nearest", + inline: "nearest", + }); } - }, [focus]); + }, [currentChatId, activeTab]); - const tabs = useMemo(() => { - return history.filter( - (chat) => - chat.read === false || - (activeTab.type === "chat" && activeTab.id == chat.id), - ); - }, [history, activeTab]); - - const shouldCollapse = useMemo(() => { - const dashboardWidth = windowWidth < 400 ? 47 : 70; // todo: compute this - const totalWidth = dashboardWidth + 140 * tabs.length; - return tabNavWidth < totalWidth; - }, [tabNavWidth, tabs.length, windowWidth]); + const handleChatThreadRenaming = useCallback( + (tabId: string, currentTitle: string) => { + setRenameState({ kind: "chat", id: tabId, value: currentTitle }); + }, + [], + ); - const handleChatThreadDeletion = useCallback(() => { - dispatch(deleteChatById(chatId)); - goToTab({ type: "dashboard" }); - }, [dispatch, chatId, goToTab]); + const handleKeyUpOnRename = useCallback( + (event: KeyboardEvent<HTMLInputElement>, tabId: string) => { + if (event.code === "Escape") { + setRenameState(null); + } + if (event.code === "Enter") { + const title = renameState?.value.trim(); + setRenameState(null); + if (!title) return; + dispatch( + saveTitle({ + id: tabId, + title, + isTitleGenerated: true, + }), + ); + dispatch(updateChatTitleById({ chatId: tabId, newTitle: title })); + } + }, + [dispatch, renameState], + ); - const handleChatThreadRenaming = useCallback(() => { - setIsRenaming(true); - }, []); + const handleTaskRenaming = useCallback( + (taskId: string, currentName: string) => { + setRenameState({ kind: "task", id: taskId, value: currentName }); + }, + [], + ); - const handleKeyUpOnRename = useCallback( - (event: KeyboardEvent<HTMLInputElement>) => { + const handleKeyUpOnTaskRename = useCallback( + (event: KeyboardEvent<HTMLInputElement>, taskId: string) => { if (event.code === "Escape") { - setIsRenaming(false); + setRenameState(null); } if (event.code === "Enter") { - setIsRenaming(false); - if (!newTitle || newTitle.trim() === "") return; - if (!isTitleGenerated) { - dispatch( - saveTitle({ - id: chatId, - title: newTitle, - isTitleGenerated: true, - }), - ); - } - dispatch(updateChatTitleById({ chatId: chatId, newTitle: newTitle })); + const name = renameState?.value.trim(); + setRenameState(null); + if (!name) return; + void updateTaskMeta({ taskId, name }); } }, - [dispatch, newTitle, chatId, isTitleGenerated], + [renameState, updateTaskMeta], ); - const handleChatTitleChange = (event: ChangeEvent<HTMLInputElement>) => { - setNewTitle(event.target.value); + const handleRenameChange = (event: ChangeEvent<HTMLInputElement>) => { + setRenameState((prev) => + prev ? { ...prev, value: event.target.value } : null, + ); }; - useEffect(() => { - setIsOnlyOneChatTab(tabs.length < 2); - }, [tabs]); + const handleCloseTab = useCallback( + (event: MouseEvent, tabId: string) => { + event.stopPropagation(); + event.preventDefault(); + dispatch(closeThread({ id: tabId })); + if (activeTab.type === "chat" && activeTab.id === tabId) { + const remainingTabs = tabs.filter((t) => t.id !== tabId); + if (remainingTabs.length > 0) { + goToTab({ type: "chat", id: remainingTabs[0].id }); + } else { + goToTab({ type: "dashboard" }); + } + } + }, + [dispatch, activeTab, tabs, goToTab], + ); + + const handleWheel = useCallback((event: React.WheelEvent<HTMLDivElement>) => { + const container = scrollContainerRef.current; + if (!container) return; + if (container.scrollWidth <= container.clientWidth) return; + event.preventDefault(); + container.scrollLeft += event.deltaY || event.deltaX; + }, []); return ( - <Flex align="center" m="4px" gap="4px" style={{ alignSelf: "stretch" }}> - <Flex flexGrow="1" align="start" maxHeight="40px" overflowY="hidden"> - <TabNav.Root style={{ flex: 1, overflowX: "scroll" }} ref={tabNav}> - <TabNav.Link - active={isDashboardTab(activeTab)} - ref={(x) => refs.setBack(x)} - onClick={() => { - setIsRenaming((prev) => (prev ? !prev : prev)); - goToTab({ type: "dashboard" }); - }} - style={{ cursor: "pointer" }} - > - {windowWidth < 400 || shouldCollapse ? <HomeIcon /> : "Home"} - </TabNav.Link> - {tabs.map((chat) => { - const isStreamingThisTab = - chat.id in cache || - (isChatTab(activeTab) && chat.id === activeTab.id && isStreaming); - const isActive = isChatTab(activeTab) && activeTab.id == chat.id; + <div className={styles.toolbar}> + <div className={styles.toolbarSection}> + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + className={classNames(styles.iconButton, styles.homeButton)} + onClick={() => { + setRenameState(null); + goToTab({ type: "dashboard" }); + }} + aria-label="Home" + > + <RefactIcon style={{ color: "#E7150D" }} /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + Home + </Text> + </HoverCard.Content> + </HoverCard.Root> + </div> + + <div className={styles.toolbarDivider} /> + + <div + ref={scrollContainerRef} + className={styles.tabsContainer} + onWheel={handleWheel} + > + <div role="tablist" className={styles.tabList}> + {openTasks.map((task) => { + const isActive = + isTaskTab(activeTab) && activeTab.taskId === task.id; + const taskName = task.name.trim() || "Task"; + const isRenaming = + renameState?.kind === "task" && renameState.id === task.id; + if (isRenaming) { return ( - <TextField.Root - my="auto" - key={chat.id} - autoComplete="off" - onKeyUp={handleKeyUpOnRename} - onBlur={() => setIsRenaming(false)} - autoFocus - size="2" - defaultValue={isTitleGenerated ? chat.title : ""} - onChange={handleChatTitleChange} - className={styles.RenameInput} - /> + <div key={`task-${task.id}`} className={styles.tabWrap}> + <TextField.Root + autoComplete="off" + onKeyUp={(e) => handleKeyUpOnTaskRename(e, task.id)} + onBlur={() => setRenameState(null)} + autoFocus + size="1" + value={renameState.value} + onChange={handleRenameChange} + className={styles.RenameInput} + /> + </div> ); } + return ( - <TabNav.Link - active={isActive} - key={chat.id} - onClick={() => { - if (shouldChatTabLinkBeNotClickable) return; - goToTab({ type: "chat", id: chat.id }); - }} - style={{ minWidth: 0, maxWidth: "150px", cursor: "pointer" }} - ref={isActive ? setFocus : undefined} - title={chat.title} + <div + key={`task-${task.id}`} + className={styles.tabWrap} + ref={isActive ? activeTabRef : undefined} > - {isStreamingThisTab && <Spinner />} - {!isStreamingThisTab && chat.read === false && ( - <DotFilledIcon /> - )} - <Flex gap="2" align="center"> - <TruncateLeft - style={{ - maxWidth: shouldCollapse ? "25px" : "110px", - }} - > - {chat.title} - </TruncateLeft> - {isActive && !isStreamingThisTab && isOnlyOneChatTab && ( - <DropdownMenu.Root> - <DropdownMenu.Trigger> - <IconButton - size="1" - variant="ghost" - color="gray" - title="Title actions" - > - <DotsVerticalIcon /> - </IconButton> - </DropdownMenu.Trigger> - <DropdownMenu.Content - size="1" - side="bottom" - align="end" - style={{ - minWidth: 110, - }} - > - <DropdownMenu.Item onClick={handleChatThreadRenaming}> - Rename - </DropdownMenu.Item> - <DropdownMenu.Item - onClick={handleChatThreadDeletion} - color="red" - > - Delete chat - </DropdownMenu.Item> - </DropdownMenu.Content> - </DropdownMenu.Root> + <button + type="button" + role="tab" + aria-selected={isActive} + className={`${styles.tabButton} ${ + isActive ? styles.tabButtonActive : "" + }`} + onClick={() => + goToTab({ type: "task", taskId: task.id, taskName }) + } + onDoubleClick={() => handleTaskRenaming(task.id, taskName)} + title={taskName} + > + <span className={styles.tabStatus}> + <StatusDot state="idle" size="small" /> + </span> + <span className={styles.tabTitle}>{taskName}</span> + </button> + <button + type="button" + className={styles.tabClose} + title="Close task tab" + onClick={(e) => handleCloseTaskTab(e, task.id)} + > + <Cross1Icon width={10} height={10} /> + </button> + </div> + ); + })} + + {tabs.map((tab) => { + const isActive = isChatTab(activeTab) && activeTab.id === tab.id; + const isRenaming = + renameState?.kind === "chat" && renameState.id === tab.id; + + if (isRenaming) { + return ( + <div key={tab.id} className={styles.tabWrap}> + <TextField.Root + autoComplete="off" + onKeyUp={(e) => handleKeyUpOnRename(e, tab.id)} + onBlur={() => setRenameState(null)} + autoFocus + size="1" + value={renameState.value} + onChange={handleRenameChange} + className={styles.RenameInput} + /> + </div> + ); + } + + const statusState = getStatusFromSessionState(tab.session_state); + + const modeInfo = modesData?.modes.find((m) => m.id === tab.mode); + const modeLabel = modeInfo?.title ?? tab.mode; + + return ( + <div + key={tab.id} + className={styles.tabWrap} + ref={isActive ? activeTabRef : undefined} + > + <button + type="button" + role="tab" + aria-selected={isActive} + className={`${styles.tabButton} ${ + isActive ? styles.tabButtonActive : "" + }`} + onClick={() => goToTab({ type: "chat", id: tab.id })} + onDoubleClick={() => + handleChatThreadRenaming(tab.id, tab.title) + } + title={tab.title} + > + <span className={styles.tabStatus}> + <StatusDot state={statusState} size="small" /> + </span> + <span className={styles.tabTitle}>{tab.title}</span> + {modeLabel && ( + <Badge + size="1" + color={getModeColor(tab.mode)} + variant="soft" + className={styles.tabModeBadge} + > + {modeLabel} + </Badge> )} - </Flex> - </TabNav.Link> + </button> + <button + type="button" + className={styles.tabClose} + title="Close tab" + onClick={(e) => handleCloseTab(e, tab.id)} + > + <Cross1Icon width={10} height={10} /> + </button> + </div> ); })} - </TabNav.Root> - </Flex> - {windowWidth < 400 ? ( - <IconButton - variant="outline" - ref={(x) => refs.setNewChat(x)} - onClick={onCreateNewChat} - > - <PlusIcon /> - </IconButton> - ) : ( - <Button - variant="outline" - ref={(x) => refs.setNewChat(x)} - onClick={onCreateNewChat} - disabled={!newChatEnabled} - > - <PlusIcon /> - <Text>New chat</Text> - </Button> - )} - <Dropdown handleNavigation={handleNavigation} /> - </Flex> + </div> + </div> + + <div className={styles.toolbarDivider} /> + + <div className={styles.toolbarSection}> + <ConnectionStatusIndicator /> + + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + className={styles.iconButton} + onClick={onCreateNewTask} + aria-label="New Task" + > + <CheckboxIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + New Task + </Text> + </HoverCard.Content> + </HoverCard.Root> + + <HoverCard.Root> + <HoverCard.Trigger> + <button + type="button" + className={styles.iconButton} + onClick={onCreateNewChat} + disabled={!newChatEnabled} + aria-label="New Chat" + > + <PlusIcon /> + </button> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + New Chat + </Text> + </HoverCard.Content> + </HoverCard.Root> + + <Dropdown handleNavigation={handleNavigation} useGhostTrigger /> + </div> + </div> ); }; diff --git a/refact-agent/gui/src/components/Tools/Textdoc.tsx b/refact-agent/gui/src/components/Tools/Textdoc.tsx index 80a2a1d82..7c454d84a 100644 --- a/refact-agent/gui/src/components/Tools/Textdoc.tsx +++ b/refact-agent/gui/src/components/Tools/Textdoc.tsx @@ -13,11 +13,17 @@ import { UpdateRegexTextDocToolCall, UpdateTextDocToolCall, UpdateTextDocByLinesToolCall, + UpdateTextDocAnchoredToolCall, + ApplyPatchToolCall, + UndoTextDocToolCall, isCreateTextDocToolCall, isReplaceTextDocToolCall, isUpdateRegexTextDocToolCall, isUpdateTextDocToolCall, isUpdateTextDocByLinesToolCall, + isUpdateTextDocAnchoredToolCall, + isApplyPatchToolCall, + isUndoTextDocToolCall, parseRawTextDocToolCall, } from "./types"; import { Box, Card, Flex, Button } from "@radix-ui/themes"; @@ -32,7 +38,7 @@ import { selectCanPaste, selectChatId } from "../../features/Chat"; import { toolsApi } from "../../services/refact"; import { ErrorCallout } from "../Callout"; import { isRTKResponseErrorWithDetailMessage } from "../../utils"; -import { MarkdownCodeBlock } from "../Markdown/CodeBlock"; +import { ShikiCodeBlock } from "../Markdown/ShikiCodeBlock"; import classNames from "classnames"; export const TextDocTool: React.FC<{ @@ -64,9 +70,31 @@ export const TextDocTool: React.FC<{ return <UpdateTextDocByLines toolCall={maybeTextDocToolCall} />; } + if (isUpdateTextDocAnchoredToolCall(maybeTextDocToolCall)) { + return <UpdateTextDocAnchored toolCall={maybeTextDocToolCall} />; + } + + if (isApplyPatchToolCall(maybeTextDocToolCall)) { + return <ApplyPatch toolCall={maybeTextDocToolCall} />; + } + + if (isUndoTextDocToolCall(maybeTextDocToolCall)) { + return <UndoTextDoc toolCall={maybeTextDocToolCall} />; + } + return false; }; +function getToolCallPath(toolCall: TextDocToolCall): string | null { + if (isApplyPatchToolCall(toolCall)) { + const match = toolCall.function.arguments.patch.match( + /^[-+]{3}\s+[ab]\/(.+)$/m, + ); + return match ? match[1] : null; + } + return (toolCall.function.arguments as { path?: string }).path ?? null; +} + type TextDocHeaderProps = { toolCall: TextDocToolCall }; const TextDocHeader = forwardRef<HTMLDivElement, TextDocHeaderProps>( ({ toolCall }, ref) => { @@ -78,15 +106,16 @@ const TextDocHeader = forwardRef<HTMLDivElement, TextDocHeaderProps>( const canPaste = useAppSelector(selectCanPaste); const chatId = useAppSelector(selectChatId); + const toolCallPath = useMemo(() => getToolCallPath(toolCall), [toolCall]); + const clearErrorMessage = useCallback(() => setErrorMessage(""), []); - // move this const handleOpenFile = useCallback(async () => { - if (!toolCall.function.arguments.path) return; + if (!toolCallPath) return; await queryPathThenOpenFile({ - file_path: toolCall.function.arguments.path, + file_path: toolCallPath, }); - }, [toolCall.function.arguments.path, queryPathThenOpenFile]); + }, [toolCallPath, queryPathThenOpenFile]); const handleReplace = useCallback( (content: string) => { @@ -148,7 +177,7 @@ const TextDocHeader = forwardRef<HTMLDivElement, TextDocHeaderProps>( void handleOpenFile(); }} > - {toolCall.function.arguments.path} + {toolCallPath ?? "apply_patch"} </Link> </TruncateLeft>{" "} <div style={{ flexGrow: 1 }} /> @@ -207,9 +236,9 @@ const CreateTextDoc: React.FC<{ <TextDocHeader toolCall={toolCall} ref={ref} /> <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> - <MarkdownCodeBlock onCopyClick={handleCopy} className={className}> + <ShikiCodeBlock onCopyClick={handleCopy} className={className}> {toolCall.function.arguments.content} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Reveal> </Box> ); @@ -241,9 +270,9 @@ const ReplaceTextDoc: React.FC<{ <Box className={styles.textdoc}> <TextDocHeader toolCall={toolCall} ref={ref} /> <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> - <MarkdownCodeBlock onCopyClick={handleCopy} className={className}> + <ShikiCodeBlock onCopyClick={handleCopy} className={className}> {toolCall.function.arguments.replacement} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Reveal> </Box> ); @@ -276,7 +305,7 @@ const UpdateRegexTextDoc: React.FC<{ <Box className={styles.textdoc}> <TextDocHeader toolCall={toolCall} ref={ref} /> <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> - <MarkdownCodeBlock className="language-py">{code}</MarkdownCodeBlock> + <ShikiCodeBlock className="language-py">{code}</ShikiCodeBlock> </Reveal> </Box> ); @@ -306,9 +335,9 @@ const UpdateTextDoc: React.FC<{ <Box className={styles.textdoc}> <TextDocHeader toolCall={toolCall} ref={ref} /> <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> - <MarkdownCodeBlock onCopyClick={handleCopy} className={className}> + <ShikiCodeBlock onCopyClick={handleCopy} className={className}> {toolCall.function.arguments.replacement} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Reveal> </Box> ); @@ -338,14 +367,102 @@ const UpdateTextDocByLines: React.FC<{ <Box className={styles.textdoc}> <TextDocHeader toolCall={toolCall} ref={ref} /> <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> - <MarkdownCodeBlock onCopyClick={handleCopy} className={className}> + <ShikiCodeBlock onCopyClick={handleCopy} className={className}> {toolCall.function.arguments.content} - </MarkdownCodeBlock> + </ShikiCodeBlock> </Reveal> </Box> ); }; +const UpdateTextDocAnchored: React.FC<{ + toolCall: UpdateTextDocAnchoredToolCall; +}> = ({ toolCall }) => { + const copyToClipBoard = useCopyToClipboard(); + const ref = useRef<HTMLDivElement>(null); + const handleClose = useHideScroll(ref); + const handleCopy = useCallback(() => { + copyToClipBoard(toolCall.function.arguments.content); + }, [copyToClipBoard, toolCall.function.arguments.content]); + + const className = useMemo(() => { + const extension = getFileExtension(toolCall.function.arguments.path); + return `language-${extension}`; + }, [toolCall.function.arguments.path]); + + const lineCount = useMemo( + () => toolCall.function.arguments.content.split("\n").length, + [toolCall.function.arguments.content], + ); + + const modeLabels = { + replace_between: "Replace between anchors", + insert_after: "Insert after anchor", + insert_before: "Insert before anchor", + } as const; + + const modeLabel = modeLabels[toolCall.function.arguments.mode]; + + return ( + <Box className={styles.textdoc}> + <TextDocHeader toolCall={toolCall} ref={ref} /> + <Box px="2" py="1"> + <span style={{ fontSize: "11px", opacity: 0.7 }}>{modeLabel}</span> + </Box> + <Reveal isRevealingCode defaultOpen={lineCount < 9} onClose={handleClose}> + <ShikiCodeBlock onCopyClick={handleCopy} className={className}> + {toolCall.function.arguments.content} + </ShikiCodeBlock> + </Reveal> + </Box> + ); +}; + +const ApplyPatch: React.FC<{ + toolCall: ApplyPatchToolCall; +}> = ({ toolCall }) => { + const ref = useRef<HTMLDivElement>(null); + const handleClose = useHideScroll(ref); + + const lineCount = useMemo( + () => toolCall.function.arguments.patch.split("\n").length, + [toolCall.function.arguments.patch], + ); + + return ( + <Box className={styles.textdoc}> + <TextDocHeader toolCall={toolCall} ref={ref} /> + <Reveal + isRevealingCode + defaultOpen={lineCount < 15} + onClose={handleClose} + > + <ShikiCodeBlock className="language-diff"> + {toolCall.function.arguments.patch} + </ShikiCodeBlock> + </Reveal> + </Box> + ); +}; + +const UndoTextDoc: React.FC<{ + toolCall: UndoTextDocToolCall; +}> = ({ toolCall }) => { + const ref = useRef<HTMLDivElement>(null); + const steps = toolCall.function.arguments.steps ?? 1; + + return ( + <Box className={styles.textdoc}> + <TextDocHeader toolCall={toolCall} ref={ref} /> + <Box px="2" py="1"> + <span style={{ fontSize: "12px" }}> + ↩️ Undo {steps} step{steps > 1 ? "s" : ""} + </span> + </Box> + </Box> + ); +}; + function getFileExtension(filePath: string): string { const fileName = filename(filePath); if (fileName.toLocaleLowerCase().startsWith("dockerfile")) diff --git a/refact-agent/gui/src/components/Tools/types.ts b/refact-agent/gui/src/components/Tools/types.ts index 547b07a7a..8d503a35d 100644 --- a/refact-agent/gui/src/components/Tools/types.ts +++ b/refact-agent/gui/src/components/Tools/types.ts @@ -7,6 +7,9 @@ export const TEXTDOC_TOOL_NAMES = [ "replace_textdoc", "update_textdoc_regex", "update_textdoc_by_lines", + "update_textdoc_anchored", + "apply_patch", + "undo_textdoc", ]; type TextDocToolNames = (typeof TEXTDOC_TOOL_NAMES)[number]; @@ -30,7 +33,7 @@ export const isRawTextDocToolCall = ( export type ParsedRawTextDocToolCall = Omit<RawTextDocTool, "function"> & { function: { name: TextDocToolNames; - arguments: Record<string, string | boolean>; + arguments: Record<string, string | boolean | number | undefined>; }; }; @@ -176,12 +179,81 @@ export const isUpdateTextDocByLinesToolCall = ( return true; }; +export interface UpdateTextDocAnchoredToolCall + extends ParsedRawTextDocToolCall { + function: { + name: "update_textdoc_anchored"; + arguments: { + path: string; + anchor1: string; + anchor2?: string; + content: string; + mode: "replace_between" | "insert_after" | "insert_before"; + multiple?: boolean; + }; + }; +} + +export const isUpdateTextDocAnchoredToolCall = ( + toolCall: ParsedRawTextDocToolCall, +): toolCall is UpdateTextDocAnchoredToolCall => { + if (toolCall.function.name !== "update_textdoc_anchored") return false; + if (!("path" in toolCall.function.arguments)) return false; + if (typeof toolCall.function.arguments.path !== "string") return false; + if (!("anchor1" in toolCall.function.arguments)) return false; + if (typeof toolCall.function.arguments.anchor1 !== "string") return false; + if (!("content" in toolCall.function.arguments)) return false; + if (typeof toolCall.function.arguments.content !== "string") return false; + if (!("mode" in toolCall.function.arguments)) return false; + return true; +}; + +export interface ApplyPatchToolCall extends ParsedRawTextDocToolCall { + function: { + name: "apply_patch"; + arguments: { + patch: string; + }; + }; +} + +export const isApplyPatchToolCall = ( + toolCall: ParsedRawTextDocToolCall, +): toolCall is ApplyPatchToolCall => { + if (toolCall.function.name !== "apply_patch") return false; + if (!("patch" in toolCall.function.arguments)) return false; + if (typeof toolCall.function.arguments.patch !== "string") return false; + return true; +}; + +export interface UndoTextDocToolCall extends ParsedRawTextDocToolCall { + function: { + name: "undo_textdoc"; + arguments: { + path: string; + steps?: number; + }; + }; +} + +export const isUndoTextDocToolCall = ( + toolCall: ParsedRawTextDocToolCall, +): toolCall is UndoTextDocToolCall => { + if (toolCall.function.name !== "undo_textdoc") return false; + if (!("path" in toolCall.function.arguments)) return false; + if (typeof toolCall.function.arguments.path !== "string") return false; + return true; +}; + export type TextDocToolCall = | CreateTextDocToolCall | UpdateTextDocToolCall | ReplaceTextDocToolCall | UpdateRegexTextDocToolCall - | UpdateTextDocByLinesToolCall; + | UpdateTextDocByLinesToolCall + | UpdateTextDocAnchoredToolCall + | ApplyPatchToolCall + | UndoTextDocToolCall; function isTextDocToolCall( toolCall: ParsedRawTextDocToolCall, @@ -191,6 +263,9 @@ function isTextDocToolCall( if (isReplaceTextDocToolCall(toolCall)) return true; if (isUpdateRegexTextDocToolCall(toolCall)) return true; if (isUpdateTextDocByLinesToolCall(toolCall)) return true; + if (isUpdateTextDocAnchoredToolCall(toolCall)) return true; + if (isApplyPatchToolCall(toolCall)) return true; + if (isUndoTextDocToolCall(toolCall)) return true; return false; } diff --git a/refact-agent/gui/src/components/Tour/Tour.tsx b/refact-agent/gui/src/components/Tour/Tour.tsx deleted file mode 100644 index 4038b144c..000000000 --- a/refact-agent/gui/src/components/Tour/Tour.tsx +++ /dev/null @@ -1,140 +0,0 @@ -import React, { useCallback, useEffect } from "react"; -import { TourBubble } from "./TourBubble"; -import { next, useTourRefs } from "../../features/Tour"; -import { useAppSelector, useAppDispatch } from "../../hooks"; -import { RootState } from "../../app/store"; -import { push } from "../../features/Pages/pagesSlice"; -import completionGif from "../../../public/completion.gif"; -import commandsGif from "../../../public/commands.gif"; -import agentGif from "../../../public/agent.gif"; -import { newChatAction } from "../../events"; - -export type TourProps = { - page: string; -}; - -export const Tour: React.FC<TourProps> = ({ page }) => { - const dispatch = useAppDispatch(); - const state = useAppSelector((state: RootState) => state.tour); - const refs = useTourRefs(); - - const openChat = useCallback(() => { - dispatch(newChatAction()); - dispatch(push({ name: "chat" })); - }, [dispatch]); - - const openHistory = useCallback(() => { - dispatch(push({ name: "history" })); - }, [dispatch]); - - const step = state.type === "in_progress" ? state.step : 0; - - useEffect(() => { - if (state.type === "in_progress" && step === 1 && page === "chat") { - dispatch(next()); - } - - if (state.type === "in_progress" && step === 4 && page === "history") { - dispatch(next()); - } - - if (state.type === "in_progress" && step === 6 && page === "history") { - dispatch(push({ name: "tour end" })); - } - - if (state.type === "finished" && page === "tour end") { - dispatch(push({ name: "history" })); - } - }, [state.type, step, page, dispatch]); - - const chatWidth = "calc(100% - 20px)"; - - // TODO: Did the Popover or HoverCard components not work for this? - return ( - <> - <TourBubble - title="Agent can accomplish tasks end to end" - text={`Write anything you want to do and Refact.ai Agent will\n- inspect your files\n- write the code\n- run shell commands if needed\n- apply the code in your files\n- open browser to check if changes are correct in case of UI`} - step={1} - target={refs.newChat} - down={true} - isPointing={false} - onPage={"history"} - onNext={openChat} - page={page} - deltaY={-40} - > - <img - style={{ marginTop: "10px", marginBottom: "30px" }} - src={agentGif} - /> - </TourBubble> - <TourBubble - title="Integrations" - text={ - "In order for agent to work properly you need to set up integrations. Just click on this button and follow the instructions." - } - step={2} - down={false} - target={refs.setupIntegrations} - containerWidth={chatWidth} - onPage={"chat"} - page={page} - bubbleContainerStyles={{ - alignSelf: "flex-end", - }} - /> - <TourBubble - title="Chat modes / models" - text={`Our chat allows you to\n- use images to give more context\n- specify context use @commands, write @help to view`} - step={3} - target={refs.chat} - onPage={"chat"} - page={page} - down={false} - > - <img - style={{ - marginTop: "10px", - marginBottom: "30px", - }} - src={commandsGif} - /> - </TourBubble> - <TourBubble - title="Difference in Quick / Explore / Agent" - text={`Switch inside of the chat let you to choose the chat mode:\n- Quick for immediate answers, no tools and context access\n- Explore for ideating and learning, chat can access the context but all changes are performed manually\n- Agent for tasks where you expect chat to make changes autonomously`} - step={4} - down={false} - target={refs.useTools} - containerWidth={chatWidth} - onPage={"chat"} - onNext={openHistory} - page={page} - bubbleContainerStyles={{ - maxWidth: 550, - alignSelf: "start", - }} - /> - <TourBubble - title="Code completion" - text={`- we use context from your entire repository\n- you can adjust the number of output tokens in Plugin settings`} - step={5} - target={refs.newChat} - down={true} - isPointing={false} - onPage={"history"} - page={page} - deltaY={-40} - > - <img - style={{ - marginTop: "10px", - marginBottom: "30px", - }} - src={completionGif} - /> - </TourBubble> - </> - ); -}; diff --git a/refact-agent/gui/src/components/Tour/TourBox.tsx b/refact-agent/gui/src/components/Tour/TourBox.tsx deleted file mode 100644 index 627b40e2c..000000000 --- a/refact-agent/gui/src/components/Tour/TourBox.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { Flex } from "@radix-ui/themes"; -import { CSSProperties, ReactNode } from "react"; -import { useAppearance } from "../../hooks"; - -export type TourBubbleProps = { - children?: ReactNode; - style?: CSSProperties; -}; - -export function TourBox({ children, style }: TourBubbleProps) { - const { appearance } = useAppearance(); - const backgroundColorForTourBox = appearance === "light" ? "black" : "white"; - - return ( - <Flex - direction="column" - style={{ - position: "relative", - backgroundColor: backgroundColorForTourBox, - borderRadius: "5px", - minHeight: "60px", - //TODO: justify prop - justifyContent: "center", - // TODO: padding prop - padding: "10px", - alignSelf: "stretch", - maxWidth: 550, - ...style, - }} - > - {children} - </Flex> - ); -} diff --git a/refact-agent/gui/src/components/Tour/TourBubble.tsx b/refact-agent/gui/src/components/Tour/TourBubble.tsx deleted file mode 100644 index ead27ca76..000000000 --- a/refact-agent/gui/src/components/Tour/TourBubble.tsx +++ /dev/null @@ -1,202 +0,0 @@ -import { Flex, Link, Text } from "@radix-ui/themes"; -import { useAppSelector, useAppDispatch, useAppearance } from "../../hooks"; -import { RootState } from "../../app/store"; -import { close, next } from "../../features/Tour"; -import { useWindowDimensions } from "../../hooks/useWindowDimensions"; -import { TourBox } from "./TourBox"; -import { TourTitle } from "./TourTitle"; -import { CSSProperties, ReactNode, useEffect, useState } from "react"; - -export type TourBubbleProps = { - title?: string; - text: string; - step: number; - down: boolean; - isPointing?: boolean; - target: HTMLElement | null; - containerWidth?: string; - onPage: string; - page: string; - deltaY?: number; - children?: ReactNode; - onNext?: () => void; - bubbleContainerStyles?: CSSProperties; -}; - -export function TourBubble({ - title, - text, - step, - target, - down, - containerWidth, - onPage, - page, - isPointing, - deltaY, - children, - onNext, - bubbleContainerStyles, -}: TourBubbleProps) { - const dispatch = useAppDispatch(); - const state = useAppSelector((state: RootState) => state.tour); - const { width: windowWidth, height: windowHeight } = useWindowDimensions(); - const [pos, setPos] = useState<DOMRect | undefined>(undefined); - const { appearance } = useAppearance(); - - const isBubbleOpen = state.type === "in_progress" && state.step === step; - - if (isPointing === undefined) { - isPointing = true; - } - - // TODO: find a better way of doing this - // This code is there to force a rerender if target is null - useEffect(() => { - const update = () => { - if (target === null || page !== onPage) { - if (pos !== undefined) { - setPos(undefined); - } - } else { - const newPos = target.getBoundingClientRect(); - if ( - pos?.left !== newPos.left || - pos.right !== newPos.right || - pos.top !== newPos.top || - pos.bottom !== newPos.bottom - ) { - setPos(newPos); - } - } - }; - update(); - - if (target !== null && page === onPage && isBubbleOpen) { - const interval = setInterval(update, 100); - return () => { - clearInterval(interval); - }; - } - }, [ - page, - onPage, - target, - pos, - setPos, - windowWidth, - windowHeight, - isBubbleOpen, - ]); - - if (pos === undefined) { - return <></>; - } - - const centX = (pos.left + pos.right) / 2 - windowWidth / 2; - const arrowColor = appearance == "light" ? "black" : "white"; - - return ( - isBubbleOpen && ( - <Flex - style={{ - flexDirection: "column", - position: "fixed", - height: 0, - width: "100%", - alignSelf: "center", - top: (deltaY ?? 0) + (down ? pos.bottom : pos.top), - zIndex: 100, - }} - > - <Flex - style={{ - position: "absolute", - width: containerWidth ?? "min(calc(100% - 20px), 540px)", - flexDirection: "column", - alignSelf: "center", - bottom: down ? "auto" : 0, - top: down ? 0 : "auto", - }} - > - {down && ( - <Flex - style={{ - width: 0, - height: 0, - borderLeft: "15px solid transparent", - borderRight: "15px solid transparent", - borderBottom: `15px solid ${arrowColor}`, - alignSelf: "center", - position: "relative", - opacity: isPointing ? 1 : 0, - left: centX, - }} - /> - )} - <TourBox style={bubbleContainerStyles}> - {title && <TourTitle title={title} />} - <Text - style={{ - color: appearance === "light" ? "white" : "black", - whiteSpace: "pre-line", - }} - mt="2" - mb={children ? "1" : "5"} - > - {text} - </Text> - {children} - <Link - style={{ - cursor: "pointer", - position: "absolute", - right: "8px", - top: "1px", - color: appearance == "light" ? "white" : "black", - }} - onClick={() => { - dispatch(close()); - }} - > - × - </Link> - <Link - style={{ - cursor: "pointer", - position: "absolute", - right: "10px", - bottom: "10px", - textTransform: "uppercase", - fontSize: "12px", - fontWeight: "bold", - color: appearance == "light" ? "#54a1ff" : "#004069", - }} - onClick={() => { - dispatch(next()); - if (onNext) onNext(); - }} - > - next - </Link> - </TourBox> - {down || ( - <Flex - style={{ - width: 0, - height: 0, - borderLeft: "15px solid transparent", - borderRight: "15px solid transparent", - borderTop: `15px solid ${arrowColor}`, - alignSelf: "center", - position: "relative", - opacity: isPointing ? 1 : 0, - left: centX, - }} - /> - )} - </Flex> - </Flex> - ) - ); -} diff --git a/refact-agent/gui/src/components/Tour/TourButton.tsx b/refact-agent/gui/src/components/Tour/TourButton.tsx deleted file mode 100644 index c44dc2030..000000000 --- a/refact-agent/gui/src/components/Tour/TourButton.tsx +++ /dev/null @@ -1,25 +0,0 @@ -import { Button, Flex } from "@radix-ui/themes"; - -export type TourButtonProps = { - title: string; - onClick: () => void; -}; - -export function TourButton({ title, onClick }: TourButtonProps) { - return ( - <Flex - direction="row" - style={{ - // TODO: align prop - alignItems: "center", - }} - > - <Button - onClick={onClick} - style={{ backgroundColor: "#E7150D", flex: 1, padding: 10 }} - > - {title} - </Button> - </Flex> - ); -} diff --git a/refact-agent/gui/src/components/Tour/TourEnd.tsx b/refact-agent/gui/src/components/Tour/TourEnd.tsx deleted file mode 100644 index a4cd2cd08..000000000 --- a/refact-agent/gui/src/components/Tour/TourEnd.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import { Flex, Text } from "@radix-ui/themes"; -import { TourBox } from "./TourBox"; -import { TourTitle } from "./TourTitle"; -import { TourButton } from "./TourButton"; -import { useAppDispatch, useAppearance, useOpenUrl } from "../../hooks"; -import { finish } from "../../features/Tour"; -import { Link } from "../Link"; - -export const TourEnd = () => { - const { appearance } = useAppearance(); - const openUrl = useOpenUrl(); - const dispatch = useAppDispatch(); - const onPressNext = () => { - dispatch(finish()); - }; - - return ( - <Flex - direction="column" - gap="2" - maxWidth="540px" - m="8px" - style={{ alignSelf: "center" }} - > - <TourBox - style={{ - gap: "15px", - alignSelf: "center", - color: appearance === "light" ? "white" : "black", - whiteSpace: "pre-line", - }} - > - <TourTitle title="Your Refact product tour is finished!" /> - <Flex direction="column"> - <Text mb="1">There are more things to explore in Refact!</Text> - <Text> - -{" "} - <Link - style={{ - color: appearance === "light" ? "white" : "black", - textDecoration: "underline", - }} - onClick={() => openUrl("https://docs.refact.ai")} - > - Check out our documentation - </Link> - </Text> - </Flex> - <TourButton title="Ready to use" onClick={onPressNext} /> - </TourBox> - </Flex> - ); -}; diff --git a/refact-agent/gui/src/components/Tour/TourTitle.tsx b/refact-agent/gui/src/components/Tour/TourTitle.tsx deleted file mode 100644 index 6959ccb61..000000000 --- a/refact-agent/gui/src/components/Tour/TourTitle.tsx +++ /dev/null @@ -1,40 +0,0 @@ -import { Flex, Text } from "@radix-ui/themes"; -import imgUrl from "../../../public/favicon.png"; -import { useAppearance } from "../../hooks"; - -export type TourTitle = { - title: string; -}; - -export function TourTitle({ title }: TourTitle) { - const { appearance } = useAppearance(); - - return ( - <Flex direction="row" style={{ alignItems: "flex-start" }}> - <img - src={imgUrl} - width={28} - height={28} - style={{ marginTop: 5, marginBottom: 5 }} - /> - <Text - size="3" - m="4" - mt="0" - mb="0" - ml="2" - style={{ - color: appearance == "light" ? "white" : "black", - // fontSize: 14, - // margin: 4, - // marginTop: 0, - // marginLeft: 8, - paddingRight: 30, - alignSelf: "center", - }} - > - {title} - </Text> - </Flex> - ); -} diff --git a/refact-agent/gui/src/components/Tour/Welcome.tsx b/refact-agent/gui/src/components/Tour/Welcome.tsx deleted file mode 100644 index 6ed6f23fd..000000000 --- a/refact-agent/gui/src/components/Tour/Welcome.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import { Flex, Text } from "@radix-ui/themes"; -import { TourBox } from "./TourBox"; -import { TourTitle } from "./TourTitle"; -import { TourButton } from "./TourButton"; -import { useAppearance } from "../../hooks"; - -export type WelcomeProps = { - onPressNext: () => void; -}; - -export const Welcome: React.FC<WelcomeProps> = ({ - onPressNext, -}: WelcomeProps) => { - const { appearance } = useAppearance(); - - return ( - <Flex - direction="column" - gap="2" - maxWidth="540px" - m="8px" - style={{ alignSelf: "center" }} - > - <TourBox style={{ gap: "15px" }}> - <TourTitle title="Welcome to Refact.ai!" /> - <Text - style={{ - color: appearance == "light" ? "white" : "black", - }} - > - {"This is a product tour: helpful tips for you to start."} - </Text> - <TourButton title="Get Started" onClick={onPressNext} /> - </TourBox> - </Flex> - ); -}; diff --git a/refact-agent/gui/src/components/Tour/index.tsx b/refact-agent/gui/src/components/Tour/index.tsx deleted file mode 100644 index a89a8ca93..000000000 --- a/refact-agent/gui/src/components/Tour/index.tsx +++ /dev/null @@ -1,3 +0,0 @@ -export { TourBubble } from "./TourBubble"; -export { Welcome } from "./Welcome"; -export { Tour } from "./Tour"; diff --git a/refact-agent/gui/src/components/Trajectory/TrajectoryButton.module.css b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.module.css new file mode 100644 index 000000000..10997b7e4 --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.module.css @@ -0,0 +1,28 @@ +.iconButton { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-11); + cursor: pointer; + transition: filter 0.15s ease; + flex-shrink: 0; +} + +.iconButton svg { + width: 15px; + height: 15px; +} + +.iconButton:hover:not(:disabled) { + filter: brightness(1.5); +} + +.iconButton:disabled { + opacity: 0.4; + cursor: not-allowed; +} diff --git a/refact-agent/gui/src/components/Trajectory/TrajectoryButton.test.tsx b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.test.tsx new file mode 100644 index 000000000..8627ba7e3 --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.test.tsx @@ -0,0 +1,17 @@ +import { describe, it, expect } from "vitest"; +import { render, screen } from "../../utils/test-utils"; +import { TrajectoryButton } from "./TrajectoryButton"; + +describe("TrajectoryButton", () => { + it("renders the trajectory button", () => { + render(<TrajectoryButton />); + const button = screen.getByTestId("trajectory-button"); + expect(button).toBeInTheDocument(); + }); + + it("has correct aria-label", () => { + render(<TrajectoryButton />); + const button = screen.getByLabelText("Compress or Handoff"); + expect(button).toBeInTheDocument(); + }); +}); diff --git a/refact-agent/gui/src/components/Trajectory/TrajectoryButton.tsx b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.tsx new file mode 100644 index 000000000..23b3aab01 --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/TrajectoryButton.tsx @@ -0,0 +1,51 @@ +import React, { useState } from "react"; +import { HoverCard, Popover, Text } from "@radix-ui/themes"; +import { ArchiveIcon } from "@radix-ui/react-icons"; +import { TrajectoryPopoverContent } from "./TrajectoryPopover"; +import styles from "./TrajectoryButton.module.css"; + +type TrajectoryButtonProps = { + forceOpen?: boolean; + onOpenChange?: (open: boolean) => void; +}; + +export const TrajectoryButton: React.FC<TrajectoryButtonProps> = ({ + forceOpen, + onOpenChange, +}) => { + const [internalOpen, setInternalOpen] = useState(false); + const isControlled = forceOpen !== undefined; + const open = isControlled ? forceOpen : internalOpen; + + const handleOpenChange = (newOpen: boolean) => { + if (!isControlled) { + setInternalOpen(newOpen); + } + onOpenChange?.(newOpen); + }; + + return ( + <Popover.Root open={open} onOpenChange={handleOpenChange}> + <HoverCard.Root openDelay={300}> + <HoverCard.Trigger> + <Popover.Trigger> + <button + type="button" + className={styles.iconButton} + data-testid="trajectory-button" + aria-label="Compress or Handoff" + > + <ArchiveIcon /> + </button> + </Popover.Trigger> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="bottom"> + <Text as="p" size="2"> + Compress or Handoff + </Text> + </HoverCard.Content> + </HoverCard.Root> + <TrajectoryPopoverContent onClose={() => handleOpenChange(false)} /> + </Popover.Root> + ); +}; diff --git a/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.module.css b/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.module.css new file mode 100644 index 000000000..542a26f4a --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.module.css @@ -0,0 +1,60 @@ +.popoverContent { + min-width: 320px; + max-width: 400px; + /* Force GPU compositing to fix JCEF repaint issues in JetBrains IDEs */ + transform: translateZ(0); + will-change: transform; +} + +.tabsList { + width: 100%; + margin-bottom: 12px; +} + +.tabsTrigger { + flex: 1; +} + +.optionsSection { + display: flex; + flex-direction: column; + gap: 12px; + margin-bottom: 16px; +} + +.previewSection { + background: var(--gray-a2); + border-radius: 4px; + padding: 12px; + margin-bottom: 12px; +} + +.previewStats { + display: flex; + justify-content: space-between; + margin-bottom: 8px; +} + +.actionsList { + margin: 0; + padding-left: 16px; +} + +.actionsListItem { + font-size: var(--font-size-1); + color: var(--gray-11); +} + +.buttonRow { + display: flex; + gap: 8px; + justify-content: flex-end; +} + +.errorCallout { + background: var(--red-a2); + border: 1px solid var(--red-a6); + border-radius: 4px; + padding: 8px; + margin-bottom: 12px; +} diff --git a/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.tsx b/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.tsx new file mode 100644 index 000000000..4b011ea0f --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/TrajectoryPopover.tsx @@ -0,0 +1,329 @@ +import React from "react"; +import { + Box, + Button, + Checkbox, + Flex, + Popover, + Spinner, + Tabs, + Text, +} from "@radix-ui/themes"; +import { useTrajectoryOps } from "../../hooks/useTrajectoryOps"; +import styles from "./TrajectoryPopover.module.css"; + +type TrajectoryPopoverContentProps = { + onClose: () => void; +}; + +export const TrajectoryPopoverContent: React.FC< + TrajectoryPopoverContentProps +> = ({ onClose }) => { + const { + activeTab, + setActiveTab, + transformOptions, + handoffOptions, + transformPreview, + handoffPreview, + isPreviewingTransform, + isApplyingTransform, + isPreviewingHandoff, + isApplyingHandoff, + handlePreviewTransform, + handleApplyTransform, + handlePreviewHandoff, + handleApplyHandoff, + clearPreviews, + updateTransformOption, + updateHandoffOption, + } = useTrajectoryOps(); + + const handleTabChange = (value: string) => { + setActiveTab(value as "compress" | "handoff"); + clearPreviews(); + }; + + const handleApplyTransformClick = async () => { + const success = await handleApplyTransform(); + if (success) { + onClose(); + } + }; + + const handleApplyHandoffClick = async () => { + const success = await handleApplyHandoff(); + if (success) { + onClose(); + } + }; + + return ( + <Popover.Content + side="bottom" + align="end" + sideOffset={8} + className={styles.popoverContent} + > + <Tabs.Root value={activeTab} onValueChange={handleTabChange}> + <Tabs.List className={styles.tabsList}> + <Tabs.Trigger value="compress" className={styles.tabsTrigger}> + Compress in-place + </Tabs.Trigger> + <Tabs.Trigger value="handoff" className={styles.tabsTrigger}> + Handoff + </Tabs.Trigger> + </Tabs.List> + + <Tabs.Content value="compress"> + <div className={styles.optionsSection}> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={transformOptions.drop_all_context} + onCheckedChange={(checked) => { + const enabled = checked === true; + updateTransformOption("drop_all_context", enabled); + if (enabled) { + updateTransformOption( + "dedup_and_compress_context", + false, + ); + } + }} + /> + Drop all context files + </Flex> + </Text> + <Text + as="label" + size="2" + color={transformOptions.drop_all_context ? "gray" : undefined} + style={{ marginLeft: "24px" }} + > + <Flex gap="2" align="center"> + <Checkbox + checked={transformOptions.dedup_and_compress_context} + disabled={transformOptions.drop_all_context} + onCheckedChange={(checked) => + updateTransformOption( + "dedup_and_compress_context", + checked === true, + ) + } + /> + Deduplicate context files + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={transformOptions.compress_non_agentic_tools} + onCheckedChange={(checked) => + updateTransformOption( + "compress_non_agentic_tools", + checked === true, + ) + } + /> + Truncate tool results + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={transformOptions.drop_all_memories} + onCheckedChange={(checked) => + updateTransformOption("drop_all_memories", checked === true) + } + /> + Drop all memories + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={transformOptions.drop_project_information} + onCheckedChange={(checked) => + updateTransformOption( + "drop_project_information", + checked === true, + ) + } + /> + Drop project information + </Flex> + </Text> + </div> + + {transformPreview && ( + <Box className={styles.previewSection}> + <Text size="2" weight="medium"> + ~ + {transformPreview.stats.before_approx_tokens > 0 + ? Math.round( + ((transformPreview.stats.before_approx_tokens - + transformPreview.stats.after_approx_tokens) / + transformPreview.stats.before_approx_tokens) * + 100, + ) + : 0} + % reduction (approximate) + </Text> + {transformPreview.actions.length > 0 && ( + <ul className={styles.actionsList}> + {transformPreview.actions.map((action, idx) => ( + <li key={idx} className={styles.actionsListItem}> + {action} + </li> + ))} + </ul> + )} + </Box> + )} + + <Flex className={styles.buttonRow}> + <Button + variant="soft" + onClick={() => { + void handlePreviewTransform(); + }} + disabled={isPreviewingTransform} + > + {isPreviewingTransform ? <Spinner size="1" /> : "Preview"} + </Button> + <Button + onClick={() => { + void handleApplyTransformClick(); + }} + disabled={!transformPreview || isApplyingTransform} + > + {isApplyingTransform ? <Spinner size="1" /> : "Apply"} + </Button> + </Flex> + </Tabs.Content> + + <Tabs.Content value="handoff"> + <div className={styles.optionsSection}> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={handoffOptions.include_last_user_plus} + onCheckedChange={(checked) => + updateHandoffOption( + "include_last_user_plus", + checked === true, + ) + } + /> + Include last user message + responses + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={handoffOptions.include_all_opened_context} + onCheckedChange={(checked) => + updateHandoffOption( + "include_all_opened_context", + checked === true, + ) + } + /> + Include all opened files + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={handoffOptions.include_agentic_tools} + onCheckedChange={(checked) => + updateHandoffOption( + "include_agentic_tools", + checked === true, + ) + } + /> + Include research, subagent & planning results + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={handoffOptions.llm_summary_for_excluded} + onCheckedChange={(checked) => + updateHandoffOption( + "llm_summary_for_excluded", + checked === true, + ) + } + /> + Generate summary + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <Checkbox + checked={handoffOptions.include_all_user_assistant_only} + onCheckedChange={(checked) => + updateHandoffOption( + "include_all_user_assistant_only", + checked === true, + ) + } + /> + Include all user messages + responses + </Flex> + </Text> + </div> + + {handoffPreview && ( + <Box className={styles.previewSection}> + <Text size="2" weight="medium" mb="2"> + ~ + {handoffPreview.stats.before_approx_tokens > 0 + ? Math.round( + ((handoffPreview.stats.before_approx_tokens - + handoffPreview.stats.after_approx_tokens) / + handoffPreview.stats.before_approx_tokens) * + 100, + ) + : 0} + % reduction (approximate) + </Text> + {handoffPreview.actions.length > 0 && ( + <ul className={styles.actionsList}> + {handoffPreview.actions.map((action, idx) => ( + <li key={idx} className={styles.actionsListItem}> + {action} + </li> + ))} + </ul> + )} + </Box> + )} + + <Flex className={styles.buttonRow}> + <Button + variant="soft" + onClick={() => { + void handlePreviewHandoff(); + }} + disabled={isPreviewingHandoff} + > + {isPreviewingHandoff ? <Spinner size="1" /> : "Preview"} + </Button> + <Button + onClick={() => { + void handleApplyHandoffClick(); + }} + disabled={!handoffPreview || isApplyingHandoff} + > + {isApplyingHandoff ? <Spinner size="1" /> : "Create"} + </Button> + </Flex> + </Tabs.Content> + </Tabs.Root> + </Popover.Content> + ); +}; diff --git a/refact-agent/gui/src/components/Trajectory/index.ts b/refact-agent/gui/src/components/Trajectory/index.ts new file mode 100644 index 000000000..9b7c79261 --- /dev/null +++ b/refact-agent/gui/src/components/Trajectory/index.ts @@ -0,0 +1,2 @@ +export { TrajectoryButton } from "./TrajectoryButton"; +export { TrajectoryPopoverContent } from "./TrajectoryPopover"; diff --git a/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.module.css b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.module.css new file mode 100644 index 000000000..2eb019e60 --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.module.css @@ -0,0 +1,23 @@ +.inlineContainer { + font-variant-numeric: tabular-nums; + color: var(--gray-11); +} + +.tokenValue { + font-weight: 400; +} + +.animateValue { + animation: tokenPulse 0.12s ease-out; +} + +@keyframes tokenPulse { + 0% { + opacity: 1; + transform: scale(1.05); + } + 100% { + opacity: 1; + transform: scale(1); + } +} diff --git a/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.test.tsx b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.test.tsx new file mode 100644 index 000000000..0099bf485 --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.test.tsx @@ -0,0 +1,400 @@ +import { describe, it, expect } from "vitest"; +import { render, screen } from "@testing-library/react"; +import { Provider } from "react-redux"; +import { configureStore } from "@reduxjs/toolkit"; +import { StreamingTokenCounter } from "./StreamingTokenCounter"; +import { chatReducer } from "../../features/Chat/Thread/reducer"; +import { newChatAction } from "../../features/Chat/Thread/actions"; +import { AssistantMessage, UserMessage } from "../../services/refact"; + +// Helper to create a minimal store +function createTestStore(overrides: { + streaming?: boolean; + waiting?: boolean; + messages?: (UserMessage | AssistantMessage)[]; + maxTokens?: number; +}) { + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + const initialState = chatReducer(emptyState, newChatAction(undefined)); + const threadId = initialState.current_thread_id; + const runtime = initialState.threads[threadId]; + + if (!runtime) { + throw new Error("Failed to create initial thread runtime"); + } + + return configureStore({ + reducer: { + chat: chatReducer, + }, + preloadedState: { + chat: { + ...initialState, + threads: { + [threadId]: { + ...runtime, + thread: { + ...runtime.thread, + messages: overrides.messages ?? [], + currentMaximumContextTokens: overrides.maxTokens ?? 8000, + }, + streaming: overrides.streaming ?? false, + waiting_for_response: overrides.waiting ?? false, + prevent_send: false, + snapshot_received: true, + }, + }, + }, + }, + }); +} + +describe("StreamingTokenCounter", () => { + describe("Visibility", () => { + it("should be hidden when not streaming or waiting", () => { + const store = createTestStore({ + streaming: false, + waiting: false, + }); + + const { container } = render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + expect(container.firstChild).toBeNull(); + }); + + it("should be hidden when waiting (before first assistant message)", () => { + const store = createTestStore({ + streaming: false, + waiting: true, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + ], + }); + + const { container } = render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should be hidden when no assistant output yet + expect(container.firstChild).toBeNull(); + }); + + it("should show immediately when streaming starts", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "H", + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show estimated token count + expect(screen.getByText(/~\d+/)).toBeInTheDocument(); + }); + }); + + describe("Token counting", () => { + it("should be hidden when no assistant message yet", () => { + const store = createTestStore({ + streaming: false, + waiting: true, + messages: [ + { + role: "user", + content: "Test question", + } as UserMessage, + ], + }); + + const { container } = render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Hidden when no assistant output + expect(container.firstChild).toBeNull(); + }); + + it("should show estimated tokens during streaming", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "Hello world", // ~3 tokens (11 chars / 4) + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show "~3" (estimated) + expect(screen.getByText(/~3/)).toBeInTheDocument(); + }); + + it("should show actual tokens when usage data available", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "Hello world", + usage: { + completion_tokens: 5, + prompt_tokens: 10, + total_tokens: 15, + }, + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show "5" (actual, no ~) + expect(screen.getByText("5")).toBeInTheDocument(); + expect(screen.queryByText(/~/)).not.toBeInTheDocument(); + }); + }); + + describe("Context percentage", () => { + it("should be hidden when waiting for new assistant after previous turn", () => { + const store = createTestStore({ + streaming: false, + waiting: true, + maxTokens: 8000, + messages: [ + { + role: "user", + content: "First question", + } as UserMessage, + { + role: "assistant", + content: "First answer", + usage: { + completion_tokens: 5, + prompt_tokens: 1000, + total_tokens: 1005, + }, + } as AssistantMessage, + { + role: "user", + content: "Second question", + } as UserMessage, + // No assistant yet - waiting for response + ], + }); + + const { container } = render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Hidden when waiting for new assistant (no current output) + expect(container.firstChild).toBeNull(); + }); + + it("should show actual tokens when assistant message has usage", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + maxTokens: 8000, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "Hello world", + usage: { + completion_tokens: 5, + prompt_tokens: 2000, + total_tokens: 2005, + }, + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show actual completion tokens (no ~ prefix) + expect(screen.getByText("5")).toBeInTheDocument(); + }); + + it("should show actual tokens at high usage", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + maxTokens: 8000, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "Response", + usage: { + completion_tokens: 100, + prompt_tokens: 5600, + total_tokens: 5700, + }, + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show actual completion tokens + expect(screen.getByText("100")).toBeInTheDocument(); + }); + + it("should show actual tokens at very high usage", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + maxTokens: 8000, + messages: [ + { + role: "user", + content: "Hello", + } as UserMessage, + { + role: "assistant", + content: "Response", + usage: { + completion_tokens: 200, + prompt_tokens: 7200, + total_tokens: 7400, + }, + } as AssistantMessage, + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show actual completion tokens + expect(screen.getByText("200")).toBeInTheDocument(); + }); + }); + + describe("Turn detection", () => { + it("should be hidden when waiting for NEW assistant (user after assistant)", () => { + const store = createTestStore({ + streaming: false, + waiting: true, + messages: [ + { + role: "user", + content: "First", + } as UserMessage, + { + role: "assistant", + content: "First response", + usage: { + completion_tokens: 5, + prompt_tokens: 1000, + total_tokens: 1005, + }, + } as AssistantMessage, + { + role: "user", + content: "Second", + } as UserMessage, + // Waiting for new assistant + ], + }); + + const { container } = render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Hidden when waiting for new assistant (no current output) + expect(container.firstChild).toBeNull(); + }); + + it("should use current assistant when continuing same turn", () => { + const store = createTestStore({ + streaming: true, + waiting: false, + messages: [ + { + role: "user", + content: "Question", + } as UserMessage, + { + role: "assistant", + content: "Streaming response...", + } as AssistantMessage, + // Still streaming same assistant message + ], + }); + + render( + <Provider store={store}> + <StreamingTokenCounter /> + </Provider>, + ); + + // Should show estimated tokens from current assistant + expect(screen.getByText(/~\d+/)).toBeInTheDocument(); + expect(screen.queryByText("…")).not.toBeInTheDocument(); + }); + }); +}); diff --git a/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.tsx b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.tsx new file mode 100644 index 000000000..d5b99b87a --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/StreamingTokenCounter.tsx @@ -0,0 +1,169 @@ +import React, { useMemo, useEffect, useRef, useState } from "react"; +import { Flex, Text } from "@radix-ui/themes"; +import { ArrowDownIcon } from "@radix-ui/react-icons"; +import classNames from "classnames"; + +import { useAppSelector } from "../../hooks"; +import { + selectIsStreaming, + selectIsWaiting, + selectMessages, +} from "../../features/Chat"; +import { + AssistantMessage, + isAssistantMessage, + isUserMessage, +} from "../../services/refact"; +import { formatNumberToFixed } from "../../utils/formatNumberToFixed"; + +import styles from "./StreamingTokenCounter.module.css"; + +function estimateTokensFromLength(length: number): number { + if (length <= 0) return 0; + return Math.ceil(length / 4); +} + +function findLastIndex<T>(arr: T[], pred: (x: T) => boolean): number { + for (let i = arr.length - 1; i >= 0; i--) { + if (pred(arr[i])) return i; + } + return -1; +} + +function getTextLength(message: AssistantMessage | null): number { + if (!message) return 0; + + let len = message.content?.length ?? 0; + + if (message.reasoning_content) { + len += message.reasoning_content.length; + } + + if (message.thinking_blocks) { + for (const block of message.thinking_blocks) { + if (block.thinking) len += block.thinking.length; + if (block.signature) len += block.signature.length; + } + } + + return len; +} + +export const StreamingTokenCounter: React.FC = () => { + const isStreaming = useAppSelector(selectIsStreaming); + const isWaiting = useAppSelector(selectIsWaiting); + const messages = useAppSelector(selectMessages); + + const [visible, setVisible] = useState(() => isStreaming || isWaiting); + const [displayTokens, setDisplayTokens] = useState(0); + const [pulseKey, setPulseKey] = useState(0); + const prevTokensRef = useRef(0); + const hideTimerRef = useRef<number | null>(null); + + const lastAssistantIdx = useMemo( + () => findLastIndex(messages, isAssistantMessage), + [messages], + ); + const lastUserIdx = useMemo( + () => findLastIndex(messages, isUserMessage), + [messages], + ); + + const waitingForNewAssistant = + (isWaiting || isStreaming) && lastUserIdx > lastAssistantIdx; + + const activeAssistantMessage = useMemo((): AssistantMessage | null => { + if (waitingForNewAssistant) return null; + if (lastAssistantIdx < 0) return null; + const msg = messages[lastAssistantIdx]; + return isAssistantMessage(msg) ? msg : null; + }, [messages, lastAssistantIdx, waitingForNewAssistant]); + + const usage = activeAssistantMessage?.usage; + + const textLength = useMemo( + (): number => getTextLength(activeAssistantMessage), + [activeAssistantMessage], + ); + + const actualOutputTokens = usage?.completion_tokens ?? 0; + const estimatedOutputTokens = useMemo((): number => { + return estimateTokensFromLength(textLength); + }, [textLength]); + + const outputTokens: number = + actualOutputTokens > 0 ? actualOutputTokens : estimatedOutputTokens; + + const hasAnyOutput = textLength > 0 || outputTokens > 0; + const hasFinalUsage = + (usage?.prompt_tokens ?? 0) > 0 || (usage?.completion_tokens ?? 0) > 0; + + useEffect(() => { + if (hideTimerRef.current) { + window.clearTimeout(hideTimerRef.current); + hideTimerRef.current = null; + } + + if (isStreaming || isWaiting) { + setVisible(true); + } else if (hasAnyOutput && !hasFinalUsage) { + setVisible(true); + hideTimerRef.current = window.setTimeout(() => setVisible(false), 60_000); + } else if (hasFinalUsage) { + setVisible(true); + hideTimerRef.current = window.setTimeout(() => setVisible(false), 2_000); + } else { + setVisible(false); + } + + return () => { + if (hideTimerRef.current) { + window.clearTimeout(hideTimerRef.current); + hideTimerRef.current = null; + } + }; + }, [isStreaming, isWaiting, hasAnyOutput, hasFinalUsage]); + + useEffect(() => { + if (outputTokens !== prevTokensRef.current) { + prevTokensRef.current = outputTokens; + setDisplayTokens(outputTokens); + setPulseKey((k: number) => k + 1); + } + }, [outputTokens]); + + useEffect(() => { + if (!visible) { + setDisplayTokens(0); + prevTokensRef.current = 0; + setPulseKey(0); + } + }, [visible]); + + if (!visible) return null; + + const hasNoOutput = textLength === 0 && outputTokens === 0; + if (hasNoOutput) return null; + + const isOutputEstimate = actualOutputTokens === 0; + + const tokensToDisplay = + isStreaming || isWaiting ? outputTokens : displayTokens; + + return ( + <Flex align="center" gap="1" className={styles.inlineContainer}> + <Text + key={pulseKey} + size="1" + color="gray" + className={classNames(styles.tokenValue, { + [styles.animateValue]: tokensToDisplay > 0, + })} + > + {isOutputEstimate ? "~" : ""} + {formatNumberToFixed(tokensToDisplay)} + </Text> + <ArrowDownIcon width={12} height={12} /> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/components/UsageCounter/TokensMapContent.module.css b/refact-agent/gui/src/components/UsageCounter/TokensMapContent.module.css new file mode 100644 index 000000000..4f9e1abf9 --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/TokensMapContent.module.css @@ -0,0 +1,53 @@ +.container { + min-width: 280px; +} + +.segmentBar { + width: 100%; + height: 12px; + border-radius: var(--radius-2); + overflow: hidden; + background-color: var(--gray-4); +} + +.segment { + height: 100%; + transition: width 0.2s ease; +} + +.segment:first-child { + border-radius: var(--radius-2) 0 0 var(--radius-2); +} + +.segment:last-child { + border-radius: 0 var(--radius-2) var(--radius-2) 0; +} + +.segment:only-child { + border-radius: var(--radius-2); +} + +.categoryRow { + padding: 2px 0; +} + +.colorDot { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} + +.percentage { + min-width: 50px; + text-align: right; +} + +.itemLabel { + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; + max-width: 180px; + direction: rtl; + text-align: left; +} diff --git a/refact-agent/gui/src/components/UsageCounter/TokensMapContent.tsx b/refact-agent/gui/src/components/UsageCounter/TokensMapContent.tsx new file mode 100644 index 000000000..7e1c71c03 --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/TokensMapContent.tsx @@ -0,0 +1,215 @@ +import { Box, Flex, Text, ScrollArea, HoverCard } from "@radix-ui/themes"; +import { InfoCircledIcon } from "@radix-ui/react-icons"; +import React, { useMemo } from "react"; +import type { TokenMap, TokenMapSegment } from "../../services/refact/chat"; +import { formatNumberToFixed } from "../../utils/formatNumberToFixed"; +import styles from "./TokensMapContent.module.css"; + +const CATEGORY_COLORS: Record<string, string> = { + system: "var(--blue-9)", + project_context: "var(--indigo-9)", + memories: "var(--violet-9)", + tools: "var(--purple-9)", + context_files: "var(--green-9)", + user_messages: "var(--orange-9)", + assistant_messages: "var(--cyan-9)", + tool_results: "var(--pink-9)", + free: "var(--gray-6)", +}; + +type SegmentBarProps = { + segments: TokenMapSegment[]; + maxTokens: number; +}; + +const SegmentBar: React.FC<SegmentBarProps> = ({ segments, maxTokens }) => { + return ( + <Flex className={styles.segmentBar}> + {segments.map((segment, index) => { + const width = maxTokens > 0 ? (segment.tokens / maxTokens) * 100 : 0; + if (width < 0.5) return null; + return ( + <Box + key={index} + className={styles.segment} + style={{ + width: `${Math.max(width, 1)}%`, + backgroundColor: + CATEGORY_COLORS[segment.category] || "var(--gray-7)", + }} + title={`${segment.label}: ${formatNumberToFixed( + segment.tokens, + )} tokens (${segment.percentage.toFixed(1)}%)`} + /> + ); + })} + </Flex> + ); +}; + +type CategoryRowProps = { + segment: TokenMapSegment; +}; + +const CategoryRow: React.FC<CategoryRowProps> = ({ segment }) => { + return ( + <Flex + align="center" + justify="between" + gap="2" + className={styles.categoryRow} + > + <Flex align="center" gap="2"> + <Box + className={styles.colorDot} + style={{ + backgroundColor: + CATEGORY_COLORS[segment.category] || "var(--gray-7)", + }} + /> + <Text size="1">{segment.label}</Text> + </Flex> + <Flex align="center" gap="2"> + <Text size="1" color="gray"> + {formatNumberToFixed(segment.tokens)} + </Text> + <Text size="1" color="gray" className={styles.percentage}> + ({segment.percentage.toFixed(1)}%) + </Text> + </Flex> + </Flex> + ); +}; + +type TokensMapContentProps = { + tokenMap: TokenMap | null | undefined; +}; + +export const TokensMapContent: React.FC<TokensMapContentProps> = ({ + tokenMap, +}) => { + const usedSegments = useMemo(() => { + if (!tokenMap) return []; + return tokenMap.segments.filter( + (s) => s.category !== "free" && s.tokens > 0, + ); + }, [tokenMap]); + + const freeSegment = useMemo(() => { + if (!tokenMap) return null; + return tokenMap.segments.find((s) => s.category === "free"); + }, [tokenMap]); + + const topItems = useMemo(() => { + if (!tokenMap) return []; + return tokenMap.top_items.slice(0, 5); + }, [tokenMap]); + + if (!tokenMap) { + return ( + <Flex direction="column" align="center" justify="center" p="3"> + <Text size="1" color="gray"> + Token breakdown not available yet + </Text> + <Text size="1" color="gray"> + Send a message to see breakdown + </Text> + </Flex> + ); + } + + const usedPercentage = + tokenMap.max_context_tokens > 0 + ? ( + (tokenMap.total_prompt_tokens / tokenMap.max_context_tokens) * + 100 + ).toFixed(1) + : "0"; + + return ( + <Flex direction="column" gap="2" p="1" className={styles.container}> + <Flex align="center" justify="between" width="100%"> + <Flex align="center" gap="1"> + <Text size="2" weight="bold"> + Token breakdown + </Text> + <HoverCard.Root> + <HoverCard.Trigger> + <InfoCircledIcon + color="var(--gray-9)" + style={{ cursor: "help" }} + /> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" style={{ maxWidth: 280 }}> + <Text as="p" size="1" color="gray"> + Total tokens are accurate (from LLM provider). + <br /> + <br /> + Category breakdown is estimated: we track token deltas between + assistant responses and distribute them proportionally by + message content length. + </Text> + </HoverCard.Content> + </HoverCard.Root> + </Flex> + <Text size="1" color="gray"> + {usedPercentage}% used + </Text> + </Flex> + + <SegmentBar + segments={tokenMap.segments} + maxTokens={tokenMap.max_context_tokens} + /> + + <Box my="1" style={{ borderTop: "1px solid var(--gray-a6)" }} /> + + <ScrollArea style={{ maxHeight: "200px" }}> + <Flex direction="column" gap="1"> + {usedSegments.map((segment, index) => ( + <CategoryRow key={index} segment={segment} /> + ))} + {freeSegment && freeSegment.tokens > 0 && ( + <CategoryRow segment={freeSegment} /> + )} + </Flex> + + {topItems.length > 0 && ( + <> + <Box my="2" style={{ borderTop: "1px solid var(--gray-a6)" }} /> + <Text size="1" weight="bold" color="gray" mb="1"> + Top contributors + </Text> + <Flex direction="column" gap="1"> + {topItems.map((item, index) => ( + <Flex key={index} align="center" justify="between" gap="2"> + <Text size="1" color="gray" className={styles.itemLabel}> + {item.label} + </Text> + <Text size="1" color="gray"> + {formatNumberToFixed(item.tokens)} + </Text> + </Flex> + ))} + </Flex> + </> + )} + </ScrollArea> + + <Flex + align="center" + justify="between" + pt="1" + style={{ borderTop: "1px solid var(--gray-a6)" }} + > + <Text size="1" color="gray"> + Total / Max + </Text> + <Text size="1"> + {formatNumberToFixed(tokenMap.total_prompt_tokens)} /{" "} + {formatNumberToFixed(tokenMap.max_context_tokens)} + </Text> + </Flex> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/components/UsageCounter/UsageCounter.module.css b/refact-agent/gui/src/components/UsageCounter/UsageCounter.module.css index bccdf5a9c..b4bfd79ca 100644 --- a/refact-agent/gui/src/components/UsageCounter/UsageCounter.module.css +++ b/refact-agent/gui/src/components/UsageCounter/UsageCounter.module.css @@ -3,12 +3,48 @@ margin-right: var(--space-3); display: flex; align-items: center; - padding: var(--space-2) var(--space-3); + padding: var(--space-1) var(--space-2); gap: 8px; max-width: max-content; opacity: 0.7; } +.usageCounterBorderless { + --base-card-surface-box-shadow: none; + --base-card-surface-hover-box-shadow: none; + --base-card-surface-active-box-shadow: none; + background: transparent; + padding: 0; + margin-right: var(--space-2); +} + +.circularProgress { + transform: rotate(-90deg); +} + +.circularProgressBg { + fill: none; + stroke: var(--gray-a5); +} + +.circularProgressFill { + fill: none; + stroke: var(--accent-9); + transition: stroke-dashoffset 0.3s ease; +} + +.circularProgressFillWarning { + fill: none; + stroke: var(--yellow-9); + transition: stroke-dashoffset 0.3s ease; +} + +.circularProgressFillOverflown { + fill: none; + stroke: var(--red-9); + transition: stroke-dashoffset 0.3s ease; +} + .usageCounterContainerInline { padding: calc(var(--space-1) * 1.5); --color-panel: transparent; @@ -17,19 +53,13 @@ } .isWarning { - --base-card-surface-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--yellow-a5), var(--yellow-5) 25%); - --base-card-surface-hover-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--yellow-a7), var(--yellow-7) 25%); - --base-card-surface-active-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--yellow-a6), var(--yellow-6) 25%); + --base-card-surface-box-shadow: 0 0 0 1px var(--yellow-8); + --base-card-surface-hover-box-shadow: 0 0 0 1px var(--yellow-9); + --base-card-surface-active-box-shadow: 0 0 0 1px var(--yellow-8); } .isOverflown { - --base-card-surface-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--red-a5), var(--red-5) 25%); - --base-card-surface-hover-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--red-a7), var(--red-7) 25%); - --base-card-surface-active-box-shadow: 0 0 0 1px - color-mix(in oklab, var(--red-a6), var(--red-6) 25%); + --base-card-surface-box-shadow: 0 0 0 1px var(--red-8); + --base-card-surface-hover-box-shadow: 0 0 0 1px var(--red-9); + --base-card-surface-active-box-shadow: 0 0 0 1px var(--red-8); } diff --git a/refact-agent/gui/src/components/UsageCounter/UsageCounter.stories.tsx b/refact-agent/gui/src/components/UsageCounter/UsageCounter.stories.tsx index 431c0c236..c506ed69d 100644 --- a/refact-agent/gui/src/components/UsageCounter/UsageCounter.stories.tsx +++ b/refact-agent/gui/src/components/UsageCounter/UsageCounter.stories.tsx @@ -28,6 +28,7 @@ const MockedStore: React.FC<{ isInline = false, isMessageEmpty = false, }) => { + const threadId = "test"; const store = setUpStore({ config: { themeProps: { @@ -37,36 +38,52 @@ const MockedStore: React.FC<{ lspPort: 8001, }, chat: { - streaming: false, - error: null, - waiting_for_response: false, - prevent_send: false, - send_immediately: false, - tool_use: "agent", - system_prompt: {}, - cache: {}, - queued_messages: [], - thread: { - id: "test", - messages: [ - { - role: "user", - content: "Hello, how are you?", + current_thread_id: threadId, + open_thread_ids: [threadId], + threads: { + [threadId]: { + thread: { + id: threadId, + messages: [ + { + role: "user", + content: "Hello, how are you?", + }, + { + role: "assistant", + content: "Test content", + usage, + }, + ], + model: "claude-3-5-sonnet", + mode: "AGENT", + new_chat_suggested: { + wasSuggested: false, + }, + currentMaximumContextTokens: threadMaximumContextTokens, + currentMessageContextTokens, }, - { - role: "assistant", - content: "Test content", - usage, + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, }, - ], - model: "claude-3-5-sonnet", - mode: "AGENT", - new_chat_suggested: { - wasSuggested: false, + snapshot_received: true, + task_widget_expanded: false, }, - currentMaximumContextTokens: threadMaximumContextTokens, - currentMessageContextTokens, }, + tool_use: "agent", + system_prompt: {}, + sse_refresh_requested: null, + stream_version: 0, }, }); diff --git a/refact-agent/gui/src/components/UsageCounter/UsageCounter.tsx b/refact-agent/gui/src/components/UsageCounter/UsageCounter.tsx index 37170107e..69d50e535 100644 --- a/refact-agent/gui/src/components/UsageCounter/UsageCounter.tsx +++ b/refact-agent/gui/src/components/UsageCounter/UsageCounter.tsx @@ -1,5 +1,12 @@ -import { ArrowDownIcon, ArrowUpIcon } from "@radix-ui/react-icons"; -import { Box, Card, Flex, HoverCard, Tabs, Text } from "@radix-ui/themes"; +import { + Card, + Flex, + HoverCard, + Text, + Box, + Tabs, + Popover, +} from "@radix-ui/themes"; import classNames from "classnames"; import React, { useMemo, useState } from "react"; @@ -7,22 +14,75 @@ import { calculateUsageInputTokens } from "../../utils/calculateUsageInputTokens import { ScrollArea } from "../ScrollArea"; import { useUsageCounter } from "./useUsageCounter"; -import { selectAllImages } from "../../features/AttachedImages"; import { selectThreadCurrentMessageTokens, - selectThreadMaximumTokens, + selectThreadImages, + selectEffectiveMaxContextTokens, } from "../../features/Chat"; +import { TokensMapContent } from "./TokensMapContent"; +import { useTokenMap } from "./useTokenMap"; import { formatNumberToFixed } from "../../utils/formatNumberToFixed"; import { useAppSelector, useEffectOnce, useTotalCostForChat, useTotalTokenMeteringForChat, + useTotalUsdForChat, } from "../../hooks"; +import { formatUsd } from "../../utils/getMetering"; import styles from "./UsageCounter.module.css"; import { Coin } from "../../images"; -import { CompressionStrength, Usage } from "../../services/refact"; + +type CircularProgressProps = { + value: number; + max: number; + size?: number; + strokeWidth?: number; +}; + +const CircularProgress: React.FC<CircularProgressProps> = ({ + value, + max, + size = 20, + strokeWidth = 3, +}) => { + const percentage = max > 0 ? Math.min((value / max) * 100, 100) : 0; + const radius = (size - strokeWidth) / 2; + const circumference = 2 * Math.PI * radius; + const strokeDashoffset = circumference - (percentage / 100) * circumference; + + const isWarning = percentage >= 70 && percentage < 90; + const isOverflown = percentage >= 90; + + return ( + <svg width={size} height={size} className={styles.circularProgress}> + <circle + className={styles.circularProgressBg} + cx={size / 2} + cy={size / 2} + r={radius} + strokeWidth={strokeWidth} + /> + <circle + className={ + isOverflown + ? styles.circularProgressFillOverflown + : isWarning + ? styles.circularProgressFillWarning + : styles.circularProgressFill + } + cx={size / 2} + cy={size / 2} + r={radius} + strokeWidth={strokeWidth} + strokeDasharray={circumference} + strokeDashoffset={strokeDashoffset} + strokeLinecap="round" + /> + </svg> + ); +}; type UsageCounterProps = | { @@ -46,51 +106,6 @@ const TokenDisplay: React.FC<{ label: string; value: number }> = ({ </Flex> ); -const TokensDisplay: React.FC<{ - currentThreadUsage?: Usage | null; - inputTokens: number; - outputTokens: number; -}> = ({ currentThreadUsage, inputTokens, outputTokens }) => { - if (!currentThreadUsage) return; - const { - cache_read_input_tokens, - cache_creation_input_tokens, - completion_tokens_details, - prompt_tokens, - } = currentThreadUsage; - - return ( - <Flex direction="column" align="start" gap="2"> - <Text size="2" mb="2"> - Tokens spent per chat thread: - </Text> - <TokenDisplay label="Input tokens (in total)" value={inputTokens} /> - - <TokenDisplay label="Prompt tokens" value={prompt_tokens} /> - - {cache_read_input_tokens !== undefined && ( - <TokenDisplay - label="Cache read input tokens" - value={cache_read_input_tokens} - /> - )} - {cache_creation_input_tokens !== undefined && ( - <TokenDisplay - label="Cache creation input tokens" - value={cache_creation_input_tokens} - /> - )} - <TokenDisplay label="Completion tokens" value={outputTokens} /> - {completion_tokens_details?.reasoning_tokens !== null && ( - <TokenDisplay - label="Reasoning tokens" - value={completion_tokens_details?.reasoning_tokens ?? 0} - /> - )} - </Flex> - ); -}; - const CoinDisplay: React.FC<{ label: React.ReactNode; value: number }> = ({ label, value, @@ -109,44 +124,12 @@ const CoinDisplay: React.FC<{ label: React.ReactNode; value: number }> = ({ ); }; -const CoinsDisplay: React.FC<{ - total: number; - prompt?: number; - generated?: number; - cacheRead?: number; - cacheCreation?: number; -}> = ({ total, prompt, generated, cacheRead, cacheCreation }) => { - return ( - <Flex direction="column" align="start" gap="2"> - <Flex align="center" justify="between" width="100%" gap="4" mb="2"> - <Text size="2">Coins spent</Text> - <Text size="2"> - <Flex align="center" gap="2"> - {Math.round(total)} <Coin width="15px" height="15px" /> - </Flex> - </Text> - </Flex> - - {prompt && <CoinDisplay label="Prompt" value={prompt} />} - - {generated !== undefined && ( - <CoinDisplay label="Completion" value={generated} /> - )} - - {cacheRead !== undefined && ( - <CoinDisplay label="Prompt cache read" value={cacheRead} /> - )} - {cacheCreation !== undefined && ( - <CoinDisplay label="Prompt cache creation" value={cacheCreation} /> - )} - </Flex> - ); -}; - const InlineHoverCard: React.FC<{ messageTokens: number }> = ({ messageTokens, }) => { - const maximumThreadContextTokens = useAppSelector(selectThreadMaximumTokens); + const maximumThreadContextTokens = useAppSelector( + selectEffectiveMaxContextTokens, + ); return ( <Flex direction="column" align="start" gap="2"> @@ -165,109 +148,6 @@ const InlineHoverCard: React.FC<{ messageTokens: number }> = ({ ); }; -const DefaultHoverCard: React.FC<{ - inputTokens: number; - outputTokens: number; -}> = ({ inputTokens, outputTokens }) => { - const cost = useTotalCostForChat(); - const meteringTokens = useTotalTokenMeteringForChat(); - const { currentThreadUsage } = useUsageCounter(); - const total = useMemo(() => { - return ( - (cost?.metering_coins_prompt ?? 0) + - (cost?.metering_coins_generated ?? 0) + - (cost?.metering_coins_cache_creation ?? 0) + - (cost?.metering_coins_cache_read ?? 0) - ); - }, [cost]); - const totalMetering = useMemo(() => { - if (meteringTokens === null) return null; - return Object.values(meteringTokens).reduce<number>( - (acc, cur) => acc + cur, - 0, - ); - }, [meteringTokens]); - - const tabsOptions = useMemo(() => { - const options = []; - if (total > 0) { - options.push({ - value: "coins", - label: "Coins", - }); - } - options.push({ - value: "tokens", - label: "Tokens", - }); - return options; - }, [total]); - - const renderContent = (optionValue: string) => { - if (optionValue === "tokens" && meteringTokens && totalMetering !== null) { - const usage: Usage = { - prompt_tokens: meteringTokens.metering_prompt_tokens_n, - total_tokens: totalMetering, - cache_creation_input_tokens: - meteringTokens.metering_cache_creation_tokens_n, - cache_read_input_tokens: meteringTokens.metering_cache_read_tokens_n, - completion_tokens: meteringTokens.metering_generated_tokens_n, - }; - return ( - <TokensDisplay - currentThreadUsage={usage} - inputTokens={ - meteringTokens.metering_prompt_tokens_n + - meteringTokens.metering_cache_read_tokens_n + - meteringTokens.metering_cache_creation_tokens_n - } - outputTokens={meteringTokens.metering_generated_tokens_n} - /> - ); - } else if (optionValue === "tokens") { - return ( - <TokensDisplay - currentThreadUsage={currentThreadUsage} - inputTokens={inputTokens} - outputTokens={outputTokens} - /> - ); - } - return ( - <CoinsDisplay - total={total} - prompt={cost?.metering_coins_prompt} - generated={cost?.metering_coins_generated} - cacheRead={cost?.metering_coins_cache_read} - cacheCreation={cost?.metering_coins_cache_creation} - /> - ); - }; - - if (tabsOptions.length === 1) { - return <Box pt="3">{renderContent(tabsOptions[0].value)}</Box>; - } - - return ( - <Tabs.Root defaultValue={tabsOptions[0].value}> - <Tabs.List size="1"> - {tabsOptions.map((option) => ( - <Tabs.Trigger value={option.value} key={option.value}> - {option.label} - </Tabs.Trigger> - ))} - </Tabs.List> - <Box pt="3"> - {tabsOptions.map((option) => ( - <Tabs.Content value={option.value} key={option.value}> - {renderContent(option.value)} - </Tabs.Content> - ))} - </Box> - </Tabs.Root> - ); -}; - const InlineHoverTriggerContent: React.FC<{ messageTokens: number }> = ({ messageTokens, }) => { @@ -281,88 +161,270 @@ const InlineHoverTriggerContent: React.FC<{ messageTokens: number }> = ({ ); }; -const formatCompressionStage = ( - strength: CompressionStrength | null | undefined, -): string | null => { - switch (strength) { - case "low": - return "1/3"; - case "medium": - return "2/3"; - case "high": - return "3/3"; - case "absent": - default: - return null; - } +const CoinsHoverContent: React.FC<{ + totalCoins: number; + prompt?: number; + generated?: number; + cacheRead?: number; + cacheCreation?: number; +}> = ({ totalCoins, prompt, generated, cacheRead, cacheCreation }) => { + return ( + <Flex direction="column" gap="2" p="1"> + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="2" weight="bold"> + Total coins + </Text> + <Text size="2"> + <Flex align="center" gap="2"> + {Math.round(totalCoins)} <Coin width="14px" height="14px" /> + </Flex> + </Text> + </Flex> + {prompt !== undefined && prompt > 0 && ( + <CoinDisplay label="Prompt" value={prompt} /> + )} + {generated !== undefined && generated > 0 && ( + <CoinDisplay label="Completion" value={generated} /> + )} + {cacheRead !== undefined && cacheRead > 0 && ( + <CoinDisplay label="Cache read" value={cacheRead} /> + )} + {cacheCreation !== undefined && cacheCreation > 0 && ( + <CoinDisplay label="Cache creation" value={cacheCreation} /> + )} + </Flex> + ); }; -const DefaultHoverTriggerContent: React.FC<{ +const UsdDisplayRow: React.FC<{ label: string; value: number | undefined }> = ({ + label, + value, +}) => ( + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="1" weight="bold"> + {label} + </Text> + <Text size="1">{formatUsd(value)}</Text> + </Flex> +); + +const UsdHoverContent: React.FC<{ + totalUsd: number; + promptUsd?: number; + generatedUsd?: number; + cacheReadUsd?: number; + cacheCreationUsd?: number; +}> = ({ + totalUsd, + promptUsd, + generatedUsd, + cacheReadUsd, + cacheCreationUsd, +}) => { + return ( + <Flex direction="column" gap="2" p="1"> + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="2" weight="bold"> + Total cost + </Text> + <Text size="2">{formatUsd(totalUsd)}</Text> + </Flex> + {promptUsd !== undefined && promptUsd > 0 && ( + <UsdDisplayRow label="Prompt" value={promptUsd} /> + )} + {generatedUsd !== undefined && generatedUsd > 0 && ( + <UsdDisplayRow label="Completion" value={generatedUsd} /> + )} + {cacheReadUsd !== undefined && cacheReadUsd > 0 && ( + <UsdDisplayRow label="Cache read" value={cacheReadUsd} /> + )} + {cacheCreationUsd !== undefined && cacheCreationUsd > 0 && ( + <UsdDisplayRow label="Cache creation" value={cacheCreationUsd} /> + )} + </Flex> + ); +}; + +const TokensHoverContent: React.FC<{ + currentSessionTokens: number; + maxContextTokens: number; inputTokens: number; outputTokens: number; - currentSessionTokens: number; - compressionStrength?: CompressionStrength | null; - totalCoins?: number; + cacheReadTokens?: number; + cacheCreationTokens?: number; }> = ({ + currentSessionTokens, + maxContextTokens, inputTokens, outputTokens, + cacheReadTokens, + cacheCreationTokens, +}) => { + const percentage = + maxContextTokens > 0 + ? Math.round((currentSessionTokens / maxContextTokens) * 100) + : 0; + + return ( + <Flex direction="column" gap="2" p="1"> + <Flex align="center" justify="between" width="100%" gap="4"> + <Text size="2" weight="bold"> + Context usage + </Text> + <Text size="2">{percentage}%</Text> + </Flex> + <TokenDisplay label="Current" value={currentSessionTokens} /> + <TokenDisplay label="Maximum" value={maxContextTokens} /> + {(inputTokens > 0 || outputTokens > 0) && ( + <> + <Box my="1" style={{ borderTop: "1px solid var(--gray-a6)" }} /> + <Text size="1" weight="bold" color="gray"> + Total tokens + </Text> + {inputTokens > 0 && ( + <TokenDisplay label="Input" value={inputTokens} /> + )} + {(cacheReadTokens ?? 0) > 0 && ( + <TokenDisplay label="Cache read" value={cacheReadTokens ?? 0} /> + )} + {(cacheCreationTokens ?? 0) > 0 && ( + <TokenDisplay + label="Cache creation" + value={cacheCreationTokens ?? 0} + /> + )} + {outputTokens > 0 && ( + <TokenDisplay label="Output" value={outputTokens} /> + )} + </> + )} + </Flex> + ); +}; + +const DefaultHoverTriggerContent: React.FC<{ + currentSessionTokens: number; + maxContextTokens: number; + totalCoins?: number; + totalUsd?: number; + inputTokens: number; + outputTokens: number; + cacheReadTokens?: number; + cacheCreationTokens?: number; + coinsPrompt?: number; + coinsGenerated?: number; + coinsCacheRead?: number; + coinsCacheCreation?: number; + usdPrompt?: number; + usdGenerated?: number; + usdCacheRead?: number; + usdCacheCreation?: number; + tokenMap?: import("../../services/refact/chat").TokenMap | null; +}> = ({ currentSessionTokens, - compressionStrength, + maxContextTokens, totalCoins, + totalUsd, + inputTokens, + outputTokens, + cacheReadTokens, + cacheCreationTokens, + coinsPrompt, + coinsGenerated, + coinsCacheRead, + coinsCacheCreation, + usdPrompt, + usdGenerated, + usdCacheRead, + usdCacheCreation, + tokenMap, }) => { - const compressionLabel = formatCompressionStage(compressionStrength); - const hasCoinsOrContext = - (totalCoins !== undefined && totalCoins > 0) || currentSessionTokens !== 0; - const hasInputOutput = inputTokens !== 0 || outputTokens !== 0; + const hasUsd = totalUsd !== undefined && totalUsd > 0; + const showCoins = !hasUsd && totalCoins !== undefined && totalCoins > 0; + const showUsd = hasUsd; return ( - <Flex direction="column" align="end" gap="1"> - {hasCoinsOrContext && ( - <Flex align="center" gap="2"> - {totalCoins !== undefined && totalCoins > 0 && ( - <Flex align="center" gap="1" title="Total coins spent"> + <Flex align="center" gap="3"> + {showCoins && ( + <HoverCard.Root> + <HoverCard.Trigger> + <Flex align="center" gap="1" style={{ cursor: "default" }}> <Text size="1">{Math.round(totalCoins)}</Text> <Coin width="12px" height="12px" /> </Flex> - )} - {currentSessionTokens !== 0 && ( - <Text size="1" color="gray" title="Current context window usage"> - ctx: {formatNumberToFixed(currentSessionTokens)} - </Text> - )} - {compressionLabel && ( - <Text - size="1" - color={ - compressionStrength === "high" - ? "red" - : compressionStrength === "medium" - ? "yellow" - : "gray" - } - title="Compression stage" - > - ⚡{compressionLabel} - </Text> - )} - </Flex> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <CoinsHoverContent + totalCoins={totalCoins} + prompt={coinsPrompt} + generated={coinsGenerated} + cacheRead={coinsCacheRead} + cacheCreation={coinsCacheCreation} + /> + </HoverCard.Content> + </HoverCard.Root> )} - {hasInputOutput && ( - <Flex align="center" gap="2" title="Total tokens: input ↑ / output ↓"> - {inputTokens !== 0 && ( - <Flex align="center"> - <ArrowUpIcon width="12" height="12" /> - <Text size="1">{formatNumberToFixed(inputTokens)}</Text> + {showUsd && ( + <HoverCard.Root> + <HoverCard.Trigger> + <Flex align="center" gap="1" style={{ cursor: "default" }}> + <Text size="1">{formatUsd(totalUsd)}</Text> </Flex> - )} - {outputTokens !== 0 && ( - <Flex align="center"> - <ArrowDownIcon width="12" height="12" /> - <Text size="1">{formatNumberToFixed(outputTokens)}</Text> - </Flex> - )} - </Flex> + </HoverCard.Trigger> + <HoverCard.Content size="1" side="top" align="center"> + <UsdHoverContent + totalUsd={totalUsd} + promptUsd={usdPrompt} + generatedUsd={usdGenerated} + cacheReadUsd={usdCacheRead} + cacheCreationUsd={usdCacheCreation} + /> + </HoverCard.Content> + </HoverCard.Root> )} + <Popover.Root> + <Popover.Trigger> + <Flex align="center" gap="1" style={{ cursor: "pointer" }}> + <CircularProgress + value={maxContextTokens > 0 ? currentSessionTokens : 0} + max={maxContextTokens > 0 ? maxContextTokens : 1} + size={18} + strokeWidth={2.5} + /> + <Text size="1" color="gray"> + {formatNumberToFixed(currentSessionTokens)} + </Text> + </Flex> + </Popover.Trigger> + <Popover.Content + size="1" + side="top" + align="center" + style={{ minWidth: "280px" }} + > + <Tabs.Root defaultValue="summary"> + <Tabs.List size="1"> + <Tabs.Trigger value="summary">Summary</Tabs.Trigger> + <Tabs.Trigger value="map">Breakdown</Tabs.Trigger> + </Tabs.List> + <Box pt="2"> + <Tabs.Content value="summary"> + <TokensHoverContent + currentSessionTokens={currentSessionTokens} + maxContextTokens={maxContextTokens} + inputTokens={inputTokens} + outputTokens={outputTokens} + cacheReadTokens={cacheReadTokens} + cacheCreationTokens={cacheCreationTokens} + /> + </Tabs.Content> + <Tabs.Content value="map"> + <TokensMapContent tokenMap={tokenMap} /> + </Tabs.Content> + </Box> + </Tabs.Root> + </Popover.Content> + </Popover.Root> </Flex> ); }; @@ -372,17 +434,14 @@ export const UsageCounter: React.FC<UsageCounterProps> = ({ isMessageEmpty, }) => { const [open, setOpen] = useState(false); - const maybeAttachedImages = useAppSelector(selectAllImages); - const { - currentThreadUsage, - isOverflown, - isWarning, - compressionStrength, - currentSessionTokens, - } = useUsageCounter(); + const maybeAttachedImages = useAppSelector(selectThreadImages); + const { currentThreadUsage, isOverflown, isWarning, currentSessionTokens } = + useUsageCounter(); const currentMessageTokens = useAppSelector(selectThreadCurrentMessageTokens); const meteringTokens = useTotalTokenMeteringForChat(); const cost = useTotalCostForChat(); + const usdCost = useTotalUsdForChat(); + const tokenMap = useTokenMap(); const totalCoins = useMemo(() => { return ( @@ -433,9 +492,28 @@ export const UsageCounter: React.FC<UsageCounterProps> = ({ return outputMeteringTokens ?? outputUsageTokens; }, [outputMeteringTokens, outputUsageTokens]); + const cacheReadTokens = useMemo(() => { + const meteringValue = meteringTokens?.metering_cache_read_tokens_n; + if (typeof meteringValue === "number") { + return meteringValue; + } + return currentThreadUsage?.cache_read_input_tokens ?? 0; + }, [meteringTokens, currentThreadUsage]); + + const cacheCreationTokens = useMemo(() => { + const meteringValue = meteringTokens?.metering_cache_creation_tokens_n; + if (typeof meteringValue === "number") { + return meteringValue; + } + return currentThreadUsage?.cache_creation_input_tokens ?? 0; + }, [meteringTokens, currentThreadUsage]); + + const maxContextTokens = useAppSelector(selectEffectiveMaxContextTokens) ?? 0; + const shouldUsageBeHidden = useMemo(() => { - return !isInline && inputTokens === 0 && outputTokens === 0; - }, [outputTokens, inputTokens, isInline]); + if (isInline) return false; + return false; + }, [isInline]); useEffectOnce(() => { const handleScroll = (event: WheelEvent) => { @@ -455,6 +533,44 @@ export const UsageCounter: React.FC<UsageCounterProps> = ({ if (shouldUsageBeHidden) return null; + // For non-inline (panel) usage, render borderless with individual hovercards + if (!isInline) { + return ( + <Flex + align="center" + className={classNames( + styles.usageCounterContainer, + styles.usageCounterBorderless, + { + [styles.isWarning]: isWarning, + [styles.isOverflown]: isOverflown, + }, + )} + > + <DefaultHoverTriggerContent + currentSessionTokens={currentSessionTokens} + maxContextTokens={maxContextTokens} + totalCoins={totalCoins} + totalUsd={usdCost?.total_usd} + inputTokens={inputTokens} + outputTokens={outputTokens} + cacheReadTokens={cacheReadTokens} + cacheCreationTokens={cacheCreationTokens} + coinsPrompt={cost?.metering_coins_prompt} + coinsGenerated={cost?.metering_coins_generated} + coinsCacheRead={cost?.metering_coins_cache_read} + coinsCacheCreation={cost?.metering_coins_cache_creation} + usdPrompt={usdCost?.prompt_usd} + usdGenerated={usdCost?.generated_usd} + usdCacheRead={usdCost?.cache_read_usd} + usdCacheCreation={usdCost?.cache_creation_usd} + tokenMap={tokenMap} + /> + </Flex> + ); + } + + // For inline usage (chat form), keep the HoverCard with detailed info return ( <HoverCard.Root open={open} onOpenChange={setOpen}> <HoverCard.Trigger> @@ -465,17 +581,7 @@ export const UsageCounter: React.FC<UsageCounterProps> = ({ [styles.isOverflown]: isOverflown, })} > - {isInline ? ( - <InlineHoverTriggerContent messageTokens={messageTokens} /> - ) : ( - <DefaultHoverTriggerContent - inputTokens={inputTokens} - outputTokens={outputTokens} - currentSessionTokens={currentSessionTokens} - compressionStrength={compressionStrength} - totalCoins={totalCoins} - /> - )} + <InlineHoverTriggerContent messageTokens={messageTokens} /> </Card> </HoverCard.Trigger> <ScrollArea scrollbars="both" asChild> @@ -485,18 +591,11 @@ export const UsageCounter: React.FC<UsageCounterProps> = ({ maxWidth="90vw" minWidth="300px" avoidCollisions - align={isInline ? "center" : "end"} + align="center" side="top" hideWhenDetached > - {isInline ? ( - <InlineHoverCard messageTokens={messageTokens} /> - ) : ( - <DefaultHoverCard - inputTokens={inputTokens} - outputTokens={outputTokens} - /> - )} + <InlineHoverCard messageTokens={messageTokens} /> </HoverCard.Content> </ScrollArea> </HoverCard.Root> diff --git a/refact-agent/gui/src/components/UsageCounter/index.ts b/refact-agent/gui/src/components/UsageCounter/index.ts index 62f85dc94..5fefa9d40 100644 --- a/refact-agent/gui/src/components/UsageCounter/index.ts +++ b/refact-agent/gui/src/components/UsageCounter/index.ts @@ -1 +1,2 @@ export { UsageCounter } from "./UsageCounter"; +export { StreamingTokenCounter } from "./StreamingTokenCounter"; diff --git a/refact-agent/gui/src/components/UsageCounter/useTokenMap.ts b/refact-agent/gui/src/components/UsageCounter/useTokenMap.ts new file mode 100644 index 000000000..94939dc9b --- /dev/null +++ b/refact-agent/gui/src/components/UsageCounter/useTokenMap.ts @@ -0,0 +1,286 @@ +import { useMemo } from "react"; +import { useAppSelector } from "../../hooks"; +import { + selectMessages, + selectEffectiveMaxContextTokens, +} from "../../features/Chat"; +import { + isAssistantMessage, + isUserMessage, + isSystemMessage, + isChatContextFileMessage, + isToolMessage, + isDiffMessage, + ChatMessage, +} from "../../services/refact/types"; +import type { + TokenMap, + TokenMapSegment, + TokenMapItem, + Usage, +} from "../../services/refact/chat"; + +function getTotalInputTokens(usage: Usage | null | undefined): number { + if (!usage) return 0; + return ( + usage.prompt_tokens + + (usage.cache_creation_input_tokens ?? 0) + + (usage.cache_read_input_tokens ?? 0) + ); +} + +const PROJECT_CONTEXT_MARKER = "project_context"; +const MEMORIES_CONTEXT_MARKER = "memories_context"; +const TASK_MEMORIES_CONTEXT_MARKER = "task_memories_context"; + +type Category = + | "system" + | "project_context" + | "memories" + | "context_files" + | "user_messages" + | "assistant_messages" + | "tool_results"; + +function getMessageTextLength(message: ChatMessage): number { + const content = message.content; + if (typeof content === "string") { + return content.length; + } + if (Array.isArray(content)) { + return content.reduce((acc: number, item: unknown) => { + if (typeof item === "string") return acc + item.length; + if (item && typeof item === "object") { + if ("text" in item) { + return acc + String((item as { text?: string }).text ?? "").length; + } + if ("file_content" in item) { + return ( + acc + + String((item as { file_content?: string }).file_content ?? "") + .length + ); + } + if ( + "type" in item && + (item as { type?: string }).type === "image_url" + ) { + return acc; + } + } + return acc + 100; + }, 0); + } + return JSON.stringify(content).length; +} + +function getMessageCategory(msg: ChatMessage): Category { + if (isSystemMessage(msg)) { + return "system"; + } + if (isChatContextFileMessage(msg)) { + const toolCallId = msg.tool_call_id; + if (toolCallId === PROJECT_CONTEXT_MARKER) { + return "project_context"; + } + if ( + toolCallId === MEMORIES_CONTEXT_MARKER || + toolCallId === TASK_MEMORIES_CONTEXT_MARKER + ) { + return "memories"; + } + return "context_files"; + } + if (isUserMessage(msg)) { + return "user_messages"; + } + if (isAssistantMessage(msg)) { + return "assistant_messages"; + } + if (isToolMessage(msg) || isDiffMessage(msg)) { + return "tool_results"; + } + return "system"; +} + +function getAssistantMessageLength(msg: ChatMessage): number { + let len = getMessageTextLength(msg); + if (isAssistantMessage(msg) && msg.tool_calls) { + len += JSON.stringify(msg.tool_calls).length; + } + return len; +} + +type CategoryTokens = Record<Category, number>; + +function createEmptyCategoryTokens(): CategoryTokens { + return { + system: 0, + project_context: 0, + memories: 0, + context_files: 0, + user_messages: 0, + assistant_messages: 0, + tool_results: 0, + }; +} + +export function useTokenMap(enabled = true): TokenMap | null { + const messages = useAppSelector(selectMessages); + const maxContextTokens = useAppSelector(selectEffectiveMaxContextTokens) ?? 0; + + return useMemo(() => { + if (!enabled) return null; + if (messages.length === 0) return null; + + const assistantIndices: number[] = []; + for (let i = 0; i < messages.length; i++) { + const msg = messages[i]; + if (isAssistantMessage(msg) && getTotalInputTokens(msg.usage) > 0) { + assistantIndices.push(i); + } + } + + if (assistantIndices.length === 0) return null; + + const categoryTokens = createEmptyCategoryTokens(); + const contextFileItems: { + label: string; + tokens: number; + category: string; + }[] = []; + + let prevPromptTokens = 0; + let prevEndIndex = -1; + + for (const assistantIndex of assistantIndices) { + const assistantMsg = messages[assistantIndex]; + const currentPromptTokens = isAssistantMessage(assistantMsg) + ? getTotalInputTokens(assistantMsg.usage) + : 0; + if (currentPromptTokens === 0) continue; + const deltaTokens = currentPromptTokens - prevPromptTokens; + + const segmentMessages: ChatMessage[] = []; + for (let i = prevEndIndex + 1; i <= assistantIndex; i++) { + segmentMessages.push(messages[i]); + } + + const segmentLengths = createEmptyCategoryTokens(); + const segmentContextFiles: { + label: string; + length: number; + category: string; + }[] = []; + + for (const msg of segmentMessages) { + const category = getMessageCategory(msg); + const len = + category === "assistant_messages" + ? getAssistantMessageLength(msg) + : getMessageTextLength(msg); + + segmentLengths[category] += len; + + if (isChatContextFileMessage(msg)) { + for (const file of msg.content) { + segmentContextFiles.push({ + label: file.file_name, + length: file.file_content.length, + category, + }); + } + } + } + + const totalSegmentLength = Object.values(segmentLengths).reduce( + (a, b) => a + b, + 0, + ); + + if (totalSegmentLength > 0 && deltaTokens > 0) { + const scale = deltaTokens / totalSegmentLength; + + for (const cat of Object.keys(segmentLengths) as Category[]) { + categoryTokens[cat] += Math.round(segmentLengths[cat] * scale); + } + + for (const item of segmentContextFiles) { + contextFileItems.push({ + label: item.label, + tokens: Math.round(item.length * scale), + category: item.category, + }); + } + } + + prevPromptTokens = currentPromptTokens; + prevEndIndex = assistantIndex; + } + + const lastAssistantIndex = assistantIndices[assistantIndices.length - 1]; + const lastAssistantMsg = messages[lastAssistantIndex]; + const totalPromptTokens = isAssistantMessage(lastAssistantMsg) + ? getTotalInputTokens(lastAssistantMsg.usage) + : 0; + if (totalPromptTokens === 0) return null; + + const totalUsedTokens = Object.values(categoryTokens).reduce( + (a, b) => a + b, + 0, + ); + const freeTokens = Math.max(0, maxContextTokens - totalUsedTokens); + + const calcPercentage = (tokens: number) => + maxContextTokens > 0 ? (tokens / maxContextTokens) * 100 : 0; + + const segments: TokenMapSegment[] = []; + + const categoryConfig: { key: Category; label: string }[] = [ + { key: "system", label: "System prompt" }, + { key: "project_context", label: "Project context" }, + { key: "memories", label: "Memories" }, + { key: "context_files", label: "Context files" }, + { key: "user_messages", label: "User messages" }, + { key: "assistant_messages", label: "Assistant messages" }, + { key: "tool_results", label: "Tool results" }, + ]; + + for (const { key, label } of categoryConfig) { + if (categoryTokens[key] > 0) { + segments.push({ + label, + category: key, + tokens: categoryTokens[key], + percentage: calcPercentage(categoryTokens[key]), + }); + } + } + + if (freeTokens > 0) { + segments.push({ + label: "Free space", + category: "free", + tokens: freeTokens, + percentage: calcPercentage(freeTokens), + }); + } + + const top_items: TokenMapItem[] = contextFileItems + .sort((a, b) => b.tokens - a.tokens) + .slice(0, 5) + .map((item) => ({ + category: item.category, + label: item.label, + tokens: item.tokens, + })); + + return { + total_prompt_tokens: totalPromptTokens, + max_context_tokens: maxContextTokens, + estimated: false, + segments, + top_items, + }; + }, [enabled, messages, maxContextTokens]); +} diff --git a/refact-agent/gui/src/components/UsageCounter/useUsageCounter.ts b/refact-agent/gui/src/components/UsageCounter/useUsageCounter.ts index 17ec84695..f3ad55b5d 100644 --- a/refact-agent/gui/src/components/UsageCounter/useUsageCounter.ts +++ b/refact-agent/gui/src/components/UsageCounter/useUsageCounter.ts @@ -1,10 +1,9 @@ import { useMemo } from "react"; import { - selectIsStreaming, - selectIsWaiting, selectMessages, + selectEffectiveMaxContextTokens, } from "../../features/Chat"; -import { useAppSelector, useLastSentCompressionStop } from "../../hooks"; +import { useAppSelector } from "../../hooks"; import { calculateUsageInputTokens, mergeUsages, @@ -12,18 +11,39 @@ import { import { isAssistantMessage } from "../../services/refact"; export function useUsageCounter() { - const isStreaming = useAppSelector(selectIsStreaming); - const isWaiting = useAppSelector(selectIsWaiting); - const compressionStop = useLastSentCompressionStop(); const messages = useAppSelector(selectMessages); - const assistantMessages = messages.filter(isAssistantMessage); - const usages = assistantMessages.map((msg) => msg.usage); - const currentThreadUsage = mergeUsages(usages); - const lastAssistantMessage = - assistantMessages.length > 0 - ? assistantMessages[assistantMessages.length - 1] - : undefined; - const lastUsage = lastAssistantMessage?.usage; + const maxContextTokens = useAppSelector(selectEffectiveMaxContextTokens); + + const { assistantMessages, currentThreadUsage, lastAssistantMessage } = + useMemo(() => { + const assistants = messages.filter(isAssistantMessage); + const mergedUsage = mergeUsages(assistants.map((msg) => msg.usage)); + const lastAssistant = + assistants.length > 0 ? assistants[assistants.length - 1] : undefined; + + return { + assistantMessages: assistants, + currentThreadUsage: mergedUsage, + lastAssistantMessage: lastAssistant, + }; + }, [messages]); + + // Check if the last message has server-executed tools (like web_search) + // These can cause temporary inflated token counts during streaming. + // We check both server_executed_tools (set after streaming) and tool_calls + // with srvtoolu_ prefix (visible during streaming) + const hasServerExecutedTools = useMemo(() => { + if (!lastAssistantMessage) return false; + const serverTools = lastAssistantMessage.server_executed_tools; + if (Array.isArray(serverTools) && serverTools.length > 0) { + return true; + } + const toolCalls = lastAssistantMessage.tool_calls; + if (Array.isArray(toolCalls)) { + return toolCalls.some((tc) => tc.id?.startsWith("srvtoolu_")); + } + return false; + }, [lastAssistantMessage]); const totalInputTokens = useMemo(() => { return calculateUsageInputTokens({ @@ -36,26 +56,61 @@ export function useUsageCounter() { }); }, [currentThreadUsage]); + // Deterministic fallback: scan backwards through assistant messages for first message with input tokens > 0 + // Include cache tokens for accurate context size (prompt_tokens + cache_creation + cache_read) const currentSessionTokens = useMemo(() => { - return lastUsage?.prompt_tokens ?? 0; - }, [lastUsage]); + for (let i = assistantMessages.length - 1; i >= 0; i--) { + const usage = assistantMessages[i]?.usage; + if (!usage) continue; + const promptTokens = usage.prompt_tokens; + const cacheCreation = usage.cache_creation_input_tokens ?? 0; + const cacheRead = usage.cache_read_input_tokens ?? 0; + const total = promptTokens + cacheCreation + cacheRead; + if (total > 0) return total; + } + return 0; + }, [assistantMessages]); - const isOverflown = useMemo(() => { - if (compressionStop.strength === "low") return true; - if (compressionStop.strength === "medium") return true; - if (compressionStop.strength === "high") return true; - return false; - }, [compressionStop.strength]); + const isContextFromPreviousMessage = useMemo(() => { + if (assistantMessages.length === 0) return false; + const lastMsg = assistantMessages[assistantMessages.length - 1]; + const usage = lastMsg.usage; + const lastTotal = + (usage?.prompt_tokens ?? 0) + + (usage?.cache_creation_input_tokens ?? 0) + + (usage?.cache_read_input_tokens ?? 0); + return lastTotal === 0 && currentSessionTokens > 0; + }, [assistantMessages, currentSessionTokens]); + const tokenPercentage = useMemo(() => { + if (!maxContextTokens || maxContextTokens === 0) return 0; + return (currentSessionTokens / maxContextTokens) * 100; + }, [currentSessionTokens, maxContextTokens]); + + // Don't show warnings when server-executed tools are present + // Claude's web_search can report inflated token counts during streaming + // that normalize after completion - this prevents false warnings const isWarning = useMemo(() => { - if (compressionStop.strength === "medium") return true; - if (compressionStop.strength === "high") return true; - return false; - }, [compressionStop.strength]); + if (hasServerExecutedTools) return false; + return tokenPercentage >= 85; + }, [tokenPercentage, hasServerExecutedTools]); + + const isOverflown = useMemo(() => { + if (hasServerExecutedTools) return false; + return tokenPercentage >= 97; + }, [tokenPercentage, hasServerExecutedTools]); const shouldShow = useMemo(() => { - return messages.length > 0 && !isStreaming && !isWaiting; - }, [messages.length, isStreaming, isWaiting]); + return messages.length > 0; + }, [messages.length]); + + // Don't mark context as full when server-executed tools are present + // Claude's web_search can report inflated token counts during streaming + // that normalize after completion - this prevents false blocking + const isContextFull = useMemo(() => { + if (hasServerExecutedTools) return false; + return tokenPercentage >= 97; + }, [tokenPercentage, hasServerExecutedTools]); return { shouldShow, @@ -64,6 +119,9 @@ export function useUsageCounter() { currentSessionTokens, isOverflown, isWarning, - compressionStrength: compressionStop.strength, + isContextFull, + tokenPercentage, + hasServerExecutedTools, + isContextFromPreviousMessage, }; } diff --git a/refact-agent/gui/src/components/shared/iconButton.module.css b/refact-agent/gui/src/components/shared/iconButton.module.css new file mode 100644 index 000000000..638135ae6 --- /dev/null +++ b/refact-agent/gui/src/components/shared/iconButton.module.css @@ -0,0 +1,66 @@ +/* Shared icon button styles - use via composes: iconButton from '../shared/iconButton.module.css' */ + +.iconButton { + display: flex; + align-items: center; + justify-content: center; + width: 24px; + height: 24px; + padding: 0; + border: none; + background: transparent; + color: var(--gray-11); + cursor: pointer; + transition: filter 0.15s ease; + flex-shrink: 0; +} + +.iconButton svg { + width: 15px; + height: 15px; +} + +.iconButton:hover:not(:disabled) { + filter: brightness(1.5); +} + +.iconButton:disabled { + opacity: 0.5; + cursor: not-allowed; +} + +/* Variant: active state */ +.active { + composes: iconButton; + color: var(--blue-11); +} + +/* Variant: danger state */ +.danger { + composes: iconButton; + color: var(--red-11); +} + +/* Variant: stop action */ +.stop { + composes: iconButton; + color: var(--red-11); +} + +/* Variant: send action */ +.send { + composes: iconButton; + color: var(--accent-11); +} + +/* Variant: queue action */ +.queue { + composes: iconButton; + color: var(--green-11); +} + +/* Variant: priority action */ +.priority { + composes: iconButton; + color: var(--blue-11); +} diff --git a/refact-agent/gui/src/components/shared/scrollbar.module.css b/refact-agent/gui/src/components/shared/scrollbar.module.css new file mode 100644 index 000000000..db47aadb8 --- /dev/null +++ b/refact-agent/gui/src/components/shared/scrollbar.module.css @@ -0,0 +1,59 @@ +/* Shared scrollbar styles - use via composes: scrollbarThin from '../shared/scrollbar.module.css' */ + +.scrollbarThin { + /* Reserve gutter to avoid layout shift; keep thumb hidden until hover. */ + scrollbar-gutter: stable; + scrollbar-width: thin; + scrollbar-color: transparent transparent; +} + +.scrollbarThin::-webkit-scrollbar { + width: 5px; + height: 5px; +} + +.scrollbarThin::-webkit-scrollbar-track { + background: transparent; +} + +.scrollbarThin::-webkit-scrollbar-thumb { + background-color: transparent; + border-radius: 2px; + transition: background-color 150ms ease-out; + opacity: 0; +} + +.scrollbarThin:hover { + scrollbar-color: var(--gray-a5) transparent; +} + +.scrollbarThin:hover::-webkit-scrollbar-thumb { + background-color: var(--gray-a5); + opacity: 1; +} + +.scrollbarThin::-webkit-scrollbar-thumb:hover { + background-color: var(--gray-a6); +} + +.scrollbarThin::-webkit-scrollbar-thumb:active { + background-color: var(--gray-a7); +} + +/* Hide thumb by default; reveal on hover (for compact panels). */ +.scrollbarHoverOnly { + /* Back-compat alias: scrollbarThin already hover-only now. */ + scrollbar-color: transparent transparent; +} + +.scrollbarHoverOnly::-webkit-scrollbar-thumb { + background-color: transparent; +} + +.scrollbarHoverOnly:hover { + scrollbar-color: var(--gray-a3) transparent; +} + +.scrollbarHoverOnly:hover::-webkit-scrollbar-thumb { + background-color: var(--gray-a3); +} diff --git a/refact-agent/gui/src/components/shared/tokens.css b/refact-agent/gui/src/components/shared/tokens.css new file mode 100644 index 000000000..1be88cfda --- /dev/null +++ b/refact-agent/gui/src/components/shared/tokens.css @@ -0,0 +1,24 @@ +/* Shared design tokens - imported globally in Theme.tsx */ + +/* Z-index scale */ +:root { + --z-base: 0; + --z-dropdown: 100; + --z-popover: 200; + --z-tooltip: 300; + --z-fixed: 400; + --z-modal: 500; + --z-overlay: 600; +} + +/* Motion/animation timing */ +:root { + --motion-fast: 150ms; + --motion-medium: 200ms; + --motion-slow: 300ms; +} + +/* Disabled state */ +:root { + --disabled-opacity: 0.5; +} diff --git a/refact-agent/gui/src/components/shared/useDelayedUnmount.ts b/refact-agent/gui/src/components/shared/useDelayedUnmount.ts new file mode 100644 index 000000000..729ee2fa7 --- /dev/null +++ b/refact-agent/gui/src/components/shared/useDelayedUnmount.ts @@ -0,0 +1,50 @@ +import { useState, useEffect, useLayoutEffect } from "react"; + +/** + * Hook that handles mount/unmount with animations. + * Returns { shouldRender, isAnimatingOpen } where: + * - shouldRender: true while content should be in DOM (including during animations) + * - isAnimatingOpen: true when the open animation should be applied (delayed by 1 frame on mount) + * + * @param isOpen - Whether the content should be visible + * @param delayMs - How long to wait before unmounting (should match animation duration) + * @param animate - Whether to animate the transition (when false, state changes are instant) + */ +export function useDelayedUnmount( + isOpen: boolean, + delayMs = 200, + animate = true, +): { shouldRender: boolean; isAnimatingOpen: boolean } { + const [shouldRender, setShouldRender] = useState(isOpen); + const [isAnimatingOpen, setIsAnimatingOpen] = useState(isOpen); + + useEffect(() => { + if (isOpen) { + setShouldRender(true); + if (!animate) { + setIsAnimatingOpen(true); + } + } else { + setIsAnimatingOpen(false); + if (!animate) { + setShouldRender(false); + return; + } + const timer = setTimeout(() => { + setShouldRender(false); + }, delayMs); + return () => clearTimeout(timer); + } + }, [isOpen, delayMs, animate]); + + useLayoutEffect(() => { + if (isOpen && shouldRender && animate) { + const raf = requestAnimationFrame(() => { + setIsAnimatingOpen(true); + }); + return () => cancelAnimationFrame(raf); + } + }, [isOpen, shouldRender, animate]); + + return { shouldRender, isAnimatingOpen }; +} diff --git a/refact-agent/gui/src/contexts/InternalLinkContext.tsx b/refact-agent/gui/src/contexts/InternalLinkContext.tsx new file mode 100644 index 000000000..8054d6797 --- /dev/null +++ b/refact-agent/gui/src/contexts/InternalLinkContext.tsx @@ -0,0 +1,31 @@ +import React, { createContext } from "react"; + +export type InternalLinkHandler = (url: string) => boolean; + +export interface InternalLinkContextValue { + handleInternalLink: InternalLinkHandler; +} + +export const InternalLinkContext = + createContext<InternalLinkContextValue | null>(null); + +interface InternalLinkProviderProps { + onInternalLink: InternalLinkHandler; + children: React.ReactNode; +} + +export const InternalLinkProvider: React.FC<InternalLinkProviderProps> = ({ + onInternalLink, + children, +}) => { + const value = React.useMemo( + () => ({ handleInternalLink: onInternalLink }), + [onInternalLink], + ); + + return ( + <InternalLinkContext.Provider value={value}> + {children} + </InternalLinkContext.Provider> + ); +}; diff --git a/refact-agent/gui/src/contexts/internalLinkUtils.ts b/refact-agent/gui/src/contexts/internalLinkUtils.ts new file mode 100644 index 000000000..dce5ac0b2 --- /dev/null +++ b/refact-agent/gui/src/contexts/internalLinkUtils.ts @@ -0,0 +1,19 @@ +import { useContext } from "react"; +import { InternalLinkContext } from "./InternalLinkContext"; + +export const useInternalLinkHandler = () => { + const context = useContext(InternalLinkContext); + return context; +}; + +export const parseRefactLink = ( + url: string, +): { type: string; id: string } | null => { + if (!url.startsWith("refact://")) return null; + + const withoutProtocol = url.substring("refact://".length); + const [type, ...rest] = withoutProtocol.split("/"); + const id = rest.join("/"); + + return { type, id }; +}; diff --git a/refact-agent/gui/src/events/index.ts b/refact-agent/gui/src/events/index.ts index a8a100c6e..e670379d9 100644 --- a/refact-agent/gui/src/events/index.ts +++ b/refact-agent/gui/src/events/index.ts @@ -6,7 +6,10 @@ export { type Chat, type ToolUse, } from "../features/Chat/Thread/types"; -export { newChatAction } from "../features/Chat/Thread/actions"; +export { + newChatAction, + newChatWithInitialMessages, +} from "../features/Chat/Thread/actions"; import { type Chat } from "../features/Chat/Thread/types"; import type { Snippet } from "../features/Chat/selectedSnippet"; import type { Config } from "../features/Config/configSlice"; @@ -15,7 +18,6 @@ import { request, ready, receive, error } from "../features/FIM/actions"; import type { HistoryState } from "../features/History/historySlice"; import type { TipOfTheDayState } from "../features/TipOfTheDay"; import type { PageSliceState } from "../features/Pages/pagesSlice"; -import type { TourState } from "../features/Tour"; import type { FIMDebugState } from "../hooks"; import { CurrentProjectInfo } from "../features/Chat/currentProject"; import { TeamsSliceState } from "../features/Teams"; @@ -49,7 +51,6 @@ export type { export type InitialState = { teams: TeamsSliceState; fim: FIMDebugState; - tour: TourState; tipOfTheDay: TipOfTheDayState; config: Config; active_file: FileInfo; @@ -81,6 +82,9 @@ export { ideSetLoginMessage, ideSetActiveTeamsGroup, ideClearActiveTeamsGroup, + ideTaskDone, + ideAskQuestions, + ideSwitchToThread, } from "../hooks/useEventBusForIDE"; export { ideAttachFileToChat } from "../hooks/useEventBusForApp"; diff --git a/refact-agent/gui/src/events/setup.ts b/refact-agent/gui/src/events/setup.ts index 2fa1b99f1..c5a333e78 100644 --- a/refact-agent/gui/src/events/setup.ts +++ b/refact-agent/gui/src/events/setup.ts @@ -10,23 +10,12 @@ export interface CloudHost { userName: string; } -export interface SelfHost { - type: "self"; - endpointAddress: string; -} - -export interface EnterpriseHost { - type: "enterprise"; - endpointAddress: string; - apiKey: string; -} - export interface ActionFromSetup { type: EVENT_NAMES_FROM_SETUP; payload?: Record<string, unknown>; } -export type HostSettings = CloudHost | SelfHost | EnterpriseHost; +export type HostSettings = CloudHost; export function isActionFromSetup(action: unknown): action is ActionFromSetup { if (!action) return false; diff --git a/refact-agent/gui/src/features/App.module.css b/refact-agent/gui/src/features/App.module.css index 8ba0c2855..334849a66 100644 --- a/refact-agent/gui/src/features/App.module.css +++ b/refact-agent/gui/src/features/App.module.css @@ -1,5 +1,6 @@ .rootFlex { height: 100vh; + overflow: hidden; } .integrationsPagePadding { diff --git a/refact-agent/gui/src/features/App.tsx b/refact-agent/gui/src/features/App.tsx index 74d6084ed..f194a88e9 100644 --- a/refact-agent/gui/src/features/App.tsx +++ b/refact-agent/gui/src/features/App.tsx @@ -8,24 +8,25 @@ import { useConfig, useEffectOnce, useEventsBusForIDE, + useSidebarSubscription, + useAllChatsSubscription, + useGetConfiguredProvidersQuery, } from "../hooks"; +import { useGetPing } from "../hooks/useGetPing"; +import { useBrowserOnlineStatus } from "../hooks/useBrowserOnlineStatus"; import { FIMDebug } from "./FIM"; -import { store, persistor, RootState } from "../app/store"; +import { store, persistor } from "../app/store"; import { Provider } from "react-redux"; import { PersistGate } from "redux-persist/integration/react"; import { Theme } from "../components/Theme"; import { useEventBusForWeb } from "../hooks/useEventBusForWeb"; import { Statistics } from "./Statistics"; -import { Welcome } from "../components/Tour"; import { push, popBackTo, pop, selectPages, } from "../features/Pages/pagesSlice"; -import { TourProvider } from "./Tour"; -import { Tour } from "../components/Tour"; -import { TourEnd } from "../components/Tour/TourEnd"; import { useEventBusForApp } from "../hooks/useEventBusForApp"; import { AbortControllerProvider } from "../contexts/AbortControllers"; import { Toolbar } from "../components/Toolbar"; @@ -37,12 +38,18 @@ import { Providers } from "./Providers"; import { UserSurvey } from "./UserSurvey"; import { integrationsApi } from "../services/refact"; import { LoginPage } from "./Login"; +import { TaskList, TaskWorkspace } from "./Tasks"; +import { KnowledgeWorkspace } from "./Knowledge"; +import { Customization } from "./Customization"; +import { DefaultModels } from "./DefaultModels"; +import { ChatLoading } from "../components/ChatContent/ChatLoading"; import styles from "./App.module.css"; import classNames from "classnames"; import { usePatchesAndDiffsEventsForIDE } from "../hooks/usePatchesAndDiffEventsForIDE"; import { UrqlProvider } from "../../urqlProvider"; import { selectActiveGroup } from "./Teams"; +import { hasAnyUsableActiveProvider } from "./Login/providerAccess"; export interface AppProps { style?: React.CSSProperties; @@ -63,34 +70,68 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { const { chatPageChange, setIsChatStreaming, setIsChatReady } = useEventsBusForIDE(); - const tourState = useAppSelector((state: RootState) => state.tour); - const historyState = useAppSelector((state: RootState) => state.history); + const historyState = useAppSelector((state) => state.history); const maybeCurrentActiveGroup = useAppSelector(selectActiveGroup); const chatId = useAppSelector(selectChatId); + const providersQuery = useGetConfiguredProvidersQuery(); useEventBusForWeb(); useEventBusForApp(); usePatchesAndDiffsEventsForIDE(); + useSidebarSubscription(); + useAllChatsSubscription(); + useGetPing(); + useBrowserOnlineStatus(); const [isPaddingApplied, setIsPaddingApplied] = useState<boolean>(false); - const handlePaddingShift = (state: boolean) => { + const handlePaddingShift = useCallback((state: boolean) => { setIsPaddingApplied(state); - }; + }, []); const config = useConfig(); - const isLoggedIn = - isPageInHistory("history") || - isPageInHistory("welcome") || - isPageInHistory("chat"); + const desiredPage = pages[pages.length - 1]; + const [renderedPage, setRenderedPage] = useState(desiredPage); useEffect(() => { - if (config.apiKey && config.addressURL && !isLoggedIn) { - if (tourState.type === "in_progress" && tourState.step === 1) { - dispatch(push({ name: "welcome" })); - } else if ( - Object.keys(historyState).length === 0 && - // TODO: rework when better router will be implemented + if (desiredPage === renderedPage) return; + if ( + desiredPage.name === renderedPage.name && + desiredPage.name !== "task workspace" && + desiredPage.name !== "thread history page" + ) { + setRenderedPage(desiredPage); + return; + } + const rafId = requestAnimationFrame(() => { + setRenderedPage(desiredPage); + }); + return () => cancelAnimationFrame(rafId); + }, [desiredPage, renderedPage]); + + const pageSwitching = desiredPage !== renderedPage; + + const isLoggedIn = isPageInHistory("history") || isPageInHistory("chat"); + + const hasCloudSession = + (config.apiKey ?? "").trim().length > 0 && + (config.addressURL ?? "").trim().length > 0; + const hasAnyActiveProvider = useMemo(() => { + return hasAnyUsableActiveProvider({ + providers: providersQuery.data?.providers ?? [], + addressURL: config.addressURL, + apiKey: config.apiKey, + }); + }, [providersQuery.data?.providers, config.addressURL, config.apiKey]); + const canAccessApp = hasCloudSession || hasAnyActiveProvider; + const canResolveProviderAccess = + providersQuery.isSuccess || providersQuery.isError; + + useEffect(() => { + if (canAccessApp && !isLoggedIn) { + if ( + !historyState.isLoading && + Object.keys(historyState.chats).length === 0 && maybeCurrentActiveGroup ) { dispatch(push({ name: "history" })); @@ -100,15 +141,15 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { dispatch(push({ name: "history" })); } } - if (!config.apiKey && !config.addressURL && isLoggedIn) { + + if (!canAccessApp && canResolveProviderAccess && isLoggedIn) { dispatch(popBackTo({ name: "login page" })); } }, [ - config.apiKey, - config.addressURL, + canAccessApp, + canResolveProviderAccess, isLoggedIn, dispatch, - tourState, historyState, maybeCurrentActiveGroup, ]); @@ -128,34 +169,40 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { setIsChatReady(true); }); - const startTour = () => { - dispatch(push({ name: "history" })); - }; - - const goBack = () => { + const goBack = useCallback(() => { dispatch(pop()); - }; + }, [dispatch]); - const goBackFromIntegrations = () => { + const goBackFromIntegrations = useCallback(() => { dispatch(pop()); dispatch(integrationsApi.util.resetApiState()); - }; - - const page = pages[pages.length - 1]; + }, [dispatch]); const activeTab: Tab | undefined = useMemo(() => { - if (page.name === "chat") { + if (desiredPage.name === "chat") { return { type: "chat", id: chatId, }; } - if (page.name === "history") { + if (desiredPage.name === "history") { return { type: "dashboard", }; } - }, [page, chatId]); + if (desiredPage.name === "task workspace") { + return { + type: "task", + taskId: desiredPage.taskId, + taskName: "", + }; + } + if (desiredPage.name === "knowledge graph") { + return { + type: "dashboard", + }; + } + }, [desiredPage, chatId]); return ( <Flex @@ -164,21 +211,21 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { style={style} className={classNames(styles.rootFlex, { [styles.integrationsPagePadding]: - page.name === "integrations page" && isPaddingApplied, + renderedPage.name === "integrations page" && isPaddingApplied, })} > + {activeTab && <Toolbar activeTab={activeTab} />} <PageWrapper host={config.host} style={{ - paddingRight: page.name === "integrations page" ? 0 : undefined, + paddingRight: + renderedPage.name === "integrations page" ? 0 : undefined, }} > <UserSurvey /> - {page.name === "login page" && <LoginPage />} - {activeTab && <Toolbar activeTab={activeTab} />} - {page.name === "welcome" && <Welcome onPressNext={startTour} />} - {page.name === "tour end" && <TourEnd />} - {page.name === "history" && ( + {renderedPage.name === "login page" && <LoginPage />} + {pageSwitching && <ChatLoading />} + {!pageSwitching && renderedPage.name === "history" && ( <Sidebar takingNotes={false} onOpenChatInTab={undefined} @@ -188,17 +235,18 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { }} /> )} - {page.name === "chat" && ( + {!pageSwitching && renderedPage.name === "chat" && ( <Chat host={config.host} tabbed={config.tabbed} backFromChat={goBack} /> )} - {page.name === "fill in the middle debug page" && ( - <FIMDebug host={config.host} tabbed={config.tabbed} /> - )} - {page.name === "statistics page" && ( + {!pageSwitching && + renderedPage.name === "fill in the middle debug page" && ( + <FIMDebug host={config.host} tabbed={config.tabbed} /> + )} + {!pageSwitching && renderedPage.name === "statistics page" && ( <Statistics backFromStatistic={goBack} tabbed={config.tabbed} @@ -206,7 +254,7 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { onCloseStatistic={goBack} /> )} - {page.name === "integrations page" && ( + {!pageSwitching && renderedPage.name === "integrations page" && ( <Integrations backFromIntegrations={goBackFromIntegrations} tabbed={config.tabbed} @@ -215,24 +263,46 @@ export const InnerApp: React.FC<AppProps> = ({ style }: AppProps) => { handlePaddingShift={handlePaddingShift} /> )} - {page.name === "providers page" && ( + {!pageSwitching && renderedPage.name === "providers page" && ( <Providers backFromProviders={goBack} tabbed={config.tabbed} host={config.host} /> )} - {page.name === "thread history page" && ( + {!pageSwitching && renderedPage.name === "thread history page" && ( <ThreadHistory backFromThreadHistory={goBack} tabbed={config.tabbed} host={config.host} onCloseThreadHistory={goBack} - chatId={page.chatId} + chatId={renderedPage.chatId} + /> + )} + {!pageSwitching && renderedPage.name === "tasks list" && <TaskList />} + {!pageSwitching && renderedPage.name === "task workspace" && ( + <TaskWorkspace taskId={renderedPage.taskId} /> + )} + {!pageSwitching && renderedPage.name === "knowledge graph" && ( + <KnowledgeWorkspace /> + )} + {!pageSwitching && renderedPage.name === "customization" && ( + <Customization + backFromCustomization={goBack} + tabbed={config.tabbed} + host={config.host} + initialKind={renderedPage.kind} + initialConfigId={renderedPage.configId} + /> + )} + {!pageSwitching && renderedPage.name === "default models" && ( + <DefaultModels + backFromDefaultModels={goBack} + tabbed={config.tabbed} + host={config.host} /> )} </PageWrapper> - {page.name !== "welcome" && <Tour page={pages[pages.length - 1].name} />} </Flex> ); }; @@ -244,11 +314,9 @@ export const App = () => { <UrqlProvider> <PersistGate persistor={persistor}> <Theme> - <TourProvider> - <AbortControllerProvider> - <InnerApp /> - </AbortControllerProvider> - </TourProvider> + <AbortControllerProvider> + <InnerApp /> + </AbortControllerProvider> </Theme> </PersistGate> </UrqlProvider> diff --git a/refact-agent/gui/src/features/AttachedImages/imagesSlice.ts b/refact-agent/gui/src/features/AttachedImages/imagesSlice.ts deleted file mode 100644 index 7b432f9dc..000000000 --- a/refact-agent/gui/src/features/AttachedImages/imagesSlice.ts +++ /dev/null @@ -1,40 +0,0 @@ -import { createSlice, type PayloadAction } from "@reduxjs/toolkit"; - -export type ImageFile = { - name: string; - content: string | ArrayBuffer | null; - type: string; -}; - -const initialState: { - images: ImageFile[]; -} = { - images: [], -}; - -export const attachedImagesSlice = createSlice({ - name: "attachedImages", - initialState: initialState, - reducers: { - addImage: (state, action: PayloadAction<ImageFile>) => { - if (state.images.length < 10) { - state.images = state.images.concat(action.payload); - } - }, - removeImageByIndex: (state, action: PayloadAction<number>) => { - state.images = state.images.filter( - (_image, index) => index !== action.payload, - ); - }, - resetAttachedImagesSlice: () => { - return initialState; - }, - }, - selectors: { - selectAllImages: (state) => state.images, - }, -}); - -export const { selectAllImages } = attachedImagesSlice.selectors; -export const { addImage, removeImageByIndex, resetAttachedImagesSlice } = - attachedImagesSlice.actions; diff --git a/refact-agent/gui/src/features/AttachedImages/index.ts b/refact-agent/gui/src/features/AttachedImages/index.ts deleted file mode 100644 index 338444c6b..000000000 --- a/refact-agent/gui/src/features/AttachedImages/index.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./imagesSlice"; diff --git a/refact-agent/gui/src/features/Chat/Chat.test.tsx b/refact-agent/gui/src/features/Chat/Chat.test.tsx deleted file mode 100644 index 01aae5a1a..000000000 --- a/refact-agent/gui/src/features/Chat/Chat.test.tsx +++ /dev/null @@ -1,819 +0,0 @@ -import { - expect, - vi, - describe, - it, - afterEach, - beforeEach, - test, - beforeAll, - afterAll, -} from "vitest"; -import { - render, - waitFor, - stubResizeObserver, - within, - // setUpSystemPromptsForChat, - cleanup, - screen, -} from "../../utils/test-utils"; -import { Chat } from "./Chat"; -// import { -// EVENT_NAMES_TO_CHAT, -// EVENT_NAMES_FROM_CHAT, -// RestoreChat, -// CreateNewChatThread, -// ChatErrorStreaming, -// ChatReceiveCapsError, -// ResponseToChat, -// ToolCall, -// ToolResult, -// } from "../events"; -import { STUB_CAPS_RESPONSE } from "../../__fixtures__"; -// import { useEventBusForChat } from "../hooks"; - -import { http, HttpResponse } from "msw"; - -import { - server, - goodCaps, - goodPrompts, - noTools, - noCommandPreview, - noCompletions, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, -} from "../../utils/mockServer"; - -const handlers = [ - goodCaps, - goodPrompts, - noTools, - noCommandPreview, - noCompletions, - goodUser, - goodPing, - chatLinks, - telemetryChat, - telemetryNetwork, -]; - -// const handlers = [ -// http.get("http://127.0.0.1:8001/v1/caps", () => { -// return HttpResponse.json(STUB_CAPS_RESPONSE); -// }), -// http.get("http://127.0.0.1:8001/v1/tools", () => { -// return HttpResponse.json([]); -// }), -// http.get("http://127.0.0.1:8001/v1/customization", () => { -// return HttpResponse.json({ system_prompts: SYSTEM_PROMPTS }); -// }), -// http.post("http://127.0.0.1:8001/v1/at-command-completion", () => { -// return HttpResponse.json({ -// completions: [], -// replace: [0, 0], -// is_cmd_executable: false, -// }); -// }), - -// http.post("http://127.0.0.1:8001/v1/at-command-preview", () => { -// return HttpResponse.json({ -// messages: [], -// }); -// }), -// ]; - -// const worker = setupServer(...handlers); - -const App: React.FC = () => { - return <Chat host="web" tabbed={false} backFromChat={() => ({})} />; -}; - -// MAybe render the chat once and use the new chat button a lot ? -afterEach(() => { - // server.resetHandlers(); - cleanup(); - // vi.restoreAllMocks(); -}); - -describe("Chat", () => { - beforeAll(() => { - // worker.listen(); - stubResizeObserver(); - }); - - afterAll(() => { - // worker.close(); - }); - - beforeEach(() => { - // worker.resetHandlers(); - // stubResizeObserver(); - // vi.spyOn(window, "postMessage").mockImplementation(postMessage); - }); - - // afterEach(() => { - // // server.resetHandlers(); - // cleanup(); - // // vi.restoreAllMocks(); - // }); - - it("should send request to the lsp", async () => { - const encoder = new TextEncoder(); - server.use(...handlers); - server.use( - http.post( - "http://127.0.0.1:8001/v1/chat", - () => { - const stream = new ReadableStream({ - start(controller) { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - content: "hello\n", - role: "user", - tool_call_id: "", - usage: null, - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: "hello", - function_call: null, - role: "assistant", - tool_calls: null, - }, - finish_reason: null, - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: " there", - function_call: null, - role: null, - tool_calls: null, - }, - finish_reason: null, - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: null, - function_call: null, - role: null, - tool_calls: null, - }, - finish_reason: "stop", - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode(`data: ${JSON.stringify(["DONE"])}\n\n`), - ); - - controller.close(); - }, - }); - - return new HttpResponse(stream, { - headers: { - "Content-Type": "application/json", - "Transfer-Encoding": "chunked", - }, - }); - }, - // { once: true }, // TODO: title - ), - ); - - const { user, ...app } = render( - <Chat host="vscode" tabbed={false} backFromChat={() => ({})} />, - { - preloadedState: { - pages: [{ name: "chat" }], - }, - }, - ); - - const textarea = screen.getByTestId("chat-form-textarea"); - - expect(textarea).not.toBeNull(); - - const quickButtons = app.getAllByText(/quick/i); - - await user.click(quickButtons[0]); - - await user.type(textarea, "hello"); - - await waitFor(() => app.queryByText(STUB_CAPS_RESPONSE.chat_default_model)); - - await user.keyboard("{Enter}"); - - await waitFor(() => { - expect(screen.getAllByText("hello there")).not.toBeNull(); - }); - }); - - // TODO: when no caps it should not send - - // TODO: skip until history is added - it.skip("when creating a new chat I can select which model to use", async () => { - // Missing props in jsdom - // window.PointerEvent = class PointerEvent extends Event {}; - server.use( - goodPrompts, - noCommandPreview, - noCompletions, - noTools, - goodCaps, - goodPing, - ); - const chatSpy = vi.fn(); - server.use( - http.post("http://127.0.0.1:8001/v1/chat", (req) => { - chatSpy(req); - return HttpResponse.json({}); - }), - ); - - const { user, ...app } = render(<App />); - - // const userInput = await app.findByText("hello"); - // expect(userInput.textContent).toContain("hello"); - - // expect(app.queryByTitle("chat model")).toBeNull(); - - // await waitFor(() => expect(app.queryByTitle("chat model")).not.toBeNull(), { - // timeout: 1000, - // }); - await waitFor(() => - expect( - app.queryByText(STUB_CAPS_RESPONSE.chat_default_model), - ).not.toBeNull(), - ); - - await user.click(app.getByTitle("chat model")); - - app.debug(app.container, 100000); - - await user.click(app.getByRole("option", { name: /test-model/i })); - - await waitFor(() => expect(app.queryByText("test-model")).not.toBeNull()); - - const textarea: HTMLTextAreaElement | null = - app.container.querySelector("textarea"); - - expect(textarea).not.toBeNull(); - if (textarea) { - await user.type(textarea, "hello"); - await user.type(textarea, "{enter}"); - } - - expect(chatSpy).toHaveBeenCalled(); - }); - - // TODO: skip until chat can initiated with messages - // it.skip("retry chat", async () => { - // vi.mock("uuid", () => ({ v4: () => "foo" })); - // const postMessageSpy = vi.spyOn(window, "postMessage"); - - // let id = ""; - // const { user, ...app } = render( - // <App - // setId={(v) => { - // id = v; - // }} - // />, - // ); - - // const restoreChatAction: RestoreChat = { - // type: EVENT_NAMES_TO_CHAT.RESTORE_CHAT, - // payload: { - // id: id, - // chat: { - // id: "bar", - // messages: [ - // ["user", "hello 👋"], - // ["assistant", "hello there"], - // ["user", "how are you?"], - // ["assistant", "fine"], - // ], - // title: "hello", - // model: "gpt-3.5-turbo", - // }, - // }, - // }; - - // postMessage(restoreChatAction); - - // await waitFor(() => expect(app.queryByText("hello 👋")).not.toBeNull()); - - // const retryButton = app.getByText(/hello 👋/); - - // await user.click(retryButton); - - // const textarea: HTMLTextAreaElement | null = - // app.container.querySelector("textarea"); - - // expect(textarea).not.toBeNull(); - // if (textarea) { - // textarea.setSelectionRange(0, textarea.value.length); - // await user.type(textarea, "{Enter}"); - // } - - // expect(postMessageSpy).toHaveBeenLastCalledWith( - // { - // type: EVENT_NAMES_FROM_CHAT.ASK_QUESTION, - // payload: { - // id: "bar", - // messages: [["user", "hello 👋"]], - // title: "hello", - // model: "gpt-3.5-turbo", - // attach_file: false, - // tools: null, - // }, - // }, - // "*", - // ); - // }); - - it("chat error streaming", async () => { - const encoder = new TextEncoder(); - server.use( - goodPing, - goodPrompts, - noCommandPreview, - goodCaps, - noCommandPreview, - noCompletions, - noTools, - chatLinks, - telemetryChat, - telemetryNetwork, - ); - server.use( - http.post( - "http://127.0.0.1:8001/v1/chat", - () => { - const stream = new ReadableStream({ - start(controller) { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - detail: "whoops", - })}\n\n`, - ), - ); - }, - }); - return new HttpResponse(stream, { - headers: { - "Content-Type": "application/json", - "Transfer-Encoding": "chunked", - }, - }); - }, - // { once: true }, TODO: title - ), - ); - const { user, ...app } = render(<App />); - - const textarea = app.getByTestId("chat-form-textarea"); - - expect(textarea).not.toBeNull(); - - const quickButtons = app.getAllByText(/quick/i); - - await user.click(quickButtons[0]); - - await user.type(textarea, "hello"); - - await user.keyboard("{Enter}"); - - await waitFor(() => expect(app.queryByText(/whoops/)).not.toBeNull()); - }); - - test.skip("chat with different system prompt", async () => { - // Missing props in jsdom - // window.PointerEvent = class PointerEvent extends Event {}; - window.HTMLElement.prototype.scrollIntoView = vi.fn(); - window.HTMLElement.prototype.hasPointerCapture = vi.fn(); - window.HTMLElement.prototype.releasePointerCapture = vi.fn(); - - // const postMessageSpy = vi.spyOn(window, "postMessage"); - // const windowSpy = vi.fn(); - // window.addEventListener("message", windowSpy); - - const { user, ...app } = render(<App />); - - // setUpSystemPromptsForChat(id); - - const btn = await waitFor(() => app.getByTitle("default"), { - timeout: 1000, - }); - - await user.click(btn); - - await user.click(app.getByText(/insert_jokes/i)); - - const textarea = app.getByTestId("chat-form-textarea"); - - expect(textarea).not.toBeNull(); - - await user.type(textarea, "hello"); - - await user.keyboard("{Enter}"); - - // expect(postMessageSpy).toHaveBeenCalledWith( - // { - // type: EVENT_NAMES_FROM_CHAT.ASK_QUESTION, - // payload: { - // id, - // title: "", - // model: "", - // attach_file: false, - // tools: null, - // messages: [ - // ["system", SYSTEM_PROMPTS.insert_jokes.text], - // ["user", "hello\n"], - // ], - // }, - // }, - // "*", - // ); - }); - - // test("restore and receive response with use question", async () => { - // vi.mock("uuid", () => ({ v4: () => "foo" })); - // let id = ""; - // const app = render( - // <App - // setId={(v) => { - // id = v; - // }} - // />, - // ); - - // const restoreChatAction: RestoreChat = { - // type: EVENT_NAMES_TO_CHAT.RESTORE_CHAT, - // payload: { - // id, - // chat: { - // id: "bar", - // messages: [ - // ["user", "/shorter"], - // ["assistant", "hello there"], - // ["user", "even shorter still"], - // ], - // title: "hello", - // model: "gpt-3.5-turbo", - // }, - // }, - // }; - - // postMessage(restoreChatAction); - - // await waitFor(() => expect(app.queryByText("hello there")).not.toBeNull()); - - // const file: ResponseToChat = { - // type: EVENT_NAMES_TO_CHAT.CHAT_RESPONSE, - // payload: { - // id: "bar", - // content: - // '[{"file_name":"/refact-chat-js/src/services/refact.ts","file_content":"hello","line1":121,"line2":451,"usefulness":100.0}]', - // role: "context_file", - // }, - // }; - - // postMessage(file); - - // const assistant: ResponseToChat = { - // type: EVENT_NAMES_TO_CHAT.CHAT_RESPONSE, - // payload: { - // id: "bar", - // role: "user", - // content: "even shorter still", - // }, - // }; - - // postMessage(assistant); - - // postMessage({ - // type: EVENT_NAMES_TO_CHAT.DONE_STREAMING, - // payload: { id: "bar" }, - // }); - - // await new Promise((r) => setTimeout(r, 500)); - - // const messages = app.getAllByText("even shorter still"); - // expect(messages.length).toBe(1); - - // expect(() => app.queryByText("hello there")).not.toBeNull(); - // }); - - // test("Chat with functions", async () => { - // const postMessageSpy = vi.spyOn(window, "postMessage"); - - // window.HTMLElement.prototype.scrollIntoView = vi.fn(); - // window.HTMLElement.prototype.hasPointerCapture = vi.fn(); - // window.HTMLElement.prototype.releasePointerCapture = vi.fn(); - - // let id = ""; - // const { user, ...app } = render( - // <App - // setId={(v) => { - // id = v; - // }} - // />, - // ); - - // const toolCalls: ToolCall[] = [ - // { - // id, - // function: { - // name: "cat", - // arguments: JSON.stringify({ file: "meow.txt" }), - // }, - // type: "function", - // index: 0, - // }, - // ]; - - // const toolResult: ToolResult = { - // tool_call_id: "a", - // finish_reason: "call_worked", - // content: "meow\nmeow\n🐈\n", - // }; - - // const restoreChatAction: RestoreChat = { - // type: EVENT_NAMES_TO_CHAT.RESTORE_CHAT, - // payload: { - // id, - // chat: { - // id: "bar", - // messages: [ - // ["user", "hello"], - // ["assistant", "hello there", toolCalls], - // ["tool", toolResult], - // ], - // title: "hello", - // model: "gpt-3.5-turbo", - // }, - // }, - // }; - - // postMessage(restoreChatAction); - - // const textarea = app.getByTestId("chat-form-textarea"); - - // expect(textarea).not.toBeNull(); - - // await user.type(textarea, "hello"); - - // await user.keyboard("{Enter}"); - - // expect(postMessageSpy).toHaveBeenCalledWith( - // { - // type: EVENT_NAMES_FROM_CHAT.ASK_QUESTION, - // payload: { - // id: "bar", - // title: "hello", - // model: "gpt-3.5-turbo", - // attach_file: false, - // tools: null, - // messages: [ - // ["user", "hello"], - // ["assistant", "hello there", toolCalls], - // ["tool", toolResult], - // ["user", "hello\n"], - // ], - // }, - // }, - // "*", - // ); - // }); - - // test("Prevent send when restored with uncalled tool_calls", async () => { - // let id = ""; - // const app = render( - // <App - // setId={(v) => { - // id = v; - // }} - // />, - // ); - - // const restoreChatAction: RestoreChat = { - // type: EVENT_NAMES_TO_CHAT.RESTORE_CHAT, - // payload: { - // id, - // chat: { - // id: "bar", - // messages: [ - // ["user", "hello 👋"], - // [ - // "assistant", - // "calling tools", - // [ - // { - // function: { - // arguments: '{"file": "foo.txt"}', - // name: "cat", - // }, - // index: 0, - // type: "function", - // id: "test", - // }, - // ], - // ], - // ], - // title: "hello", - // model: "gpt-3.5-turbo", - // }, - // }, - // }; - - // postMessage(restoreChatAction); - - // await waitFor(() => expect(app.queryByText("hello 👋")).not.toBeNull()); - - // const button = app.queryByText(/resume/i); - - // expect(button).not.toBeNull(); - // }); -}); - -describe("attached file", () => { - test("given a file has been attached to a message, it should un-attach the file after sending", async () => { - const encoder = new TextEncoder(); - server.use(...handlers); - server.use( - http.post("http://127.0.0.1:8001/v1/chat", () => { - const stream = new ReadableStream({ - start(controller) { - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - content: "hello\n", - role: "user", - tool_call_id: "", - usage: null, - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: "hello", - function_call: null, - role: "assistant", - tool_calls: null, - }, - finish_reason: null, - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: " there", - function_call: null, - role: null, - tool_calls: null, - }, - finish_reason: null, - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode( - `data: ${JSON.stringify({ - choices: [ - { - delta: { - content: null, - function_call: null, - role: null, - tool_calls: null, - }, - finish_reason: "stop", - index: 0, - logprobs: null, - }, - ], - })}\n\n`, - ), - ); - - controller.enqueue( - encoder.encode(`data: ${JSON.stringify(["DONE"])}\n\n`), - ); - - controller.close(); - }, - }); - - return new HttpResponse(stream, { - headers: { - "Content-Type": "application/json", - "Transfer-Encoding": "chunked", - }, - }); - }), - ); - const { user, ...app } = render(<App />, { - preloadedState: { - config: { - host: "ide", - lspPort: 8001, - themeProps: {}, - }, - active_file: { - name: "test_file.md", - line1: null, - line2: null, - // attach: false, - can_paste: false, - path: "path/test_file.md", - cursor: null, - }, - selected_snippet: { - language: "md", - code: "### Hello", - path: "path/test_file.md", - basename: "test_file.md", - }, - }, - }); - - const fileList = app.getByTestId("attached_file_list"); - expect(fileList).not.toBeNull(); - - const fileButton = within(fileList).queryByRole("button", { - name: /test_file\.md/i, - }); - - expect(fileButton).not.toBeNull(); - - const textarea = app.getByTestId("chat-form-textarea"); - expect(textarea).not.toBeNull(); - await user.type(textarea, "👋"); - await user.keyboard("{Enter}"); - - await waitFor(() => - expect(app.queryByTestId("attached_file_list")).toBeNull(), - ); - }); -}); diff --git a/refact-agent/gui/src/features/Chat/Thread/actions.ts b/refact-agent/gui/src/features/Chat/Thread/actions.ts index 7495754dd..9ca2b74fa 100644 --- a/refact-agent/gui/src/features/Chat/Thread/actions.ts +++ b/refact-agent/gui/src/features/Chat/Thread/actions.ts @@ -4,66 +4,197 @@ import { type ChatThread, type PayloadWithId, type ToolUse, + type ImageFile, + type TextFile, IntegrationMeta, LspChatMode, PayloadWithChatAndMessageId, PayloadWithChatAndBoolean, PayloadWithChatAndNumber, } from "./types"; -import { - isAssistantDelta, - isAssistantMessage, - isCDInstructionMessage, - isChatResponseChoice, - isToolCallMessage, - isToolMessage, - isUserMessage, - ToolCall, - ToolMessage, - type ChatMessages, - type ChatResponse, -} from "../../../services/refact/types"; +import type { ToolConfirmationPauseReason } from "../../../services/refact"; +import { type ChatMessages } from "../../../services/refact/types"; import type { AppDispatch, RootState } from "../../../app/store"; import { type SystemPrompts } from "../../../services/refact/prompts"; -import { formatMessagesForLsp, consumeStream } from "./utils"; -import { generateChatTitle, sendChat } from "../../../services/refact/chat"; -// import { ToolCommand, toolsApi } from "../../../services/refact/tools"; -import { scanFoDuplicatesWith, takeFromEndWhile } from "../../../utils"; import { ChatHistoryItem } from "../../History/historySlice"; import { ideToolCallResponse } from "../../../hooks/useEventBusForIDE"; import { - capsApi, - DetailMessageWithErrorType, - isDetailMessage, + trajectoriesApi, + trajectoryDataToChatThread, + isUserMessage, } from "../../../services/refact"; +import { + sendUserMessage, + sendChatCommand, + type MessageContent, +} from "../../../services/refact/chatCommands"; +import { selectLspPort, selectApiKey } from "../../Config/configSlice"; +import { selectCurrentThreadId } from "./selectors"; + +function buildThreadParamsPatch( + thread: ChatThread, + isNewChat: boolean, +): Record<string, unknown> { + const patch: Record<string, unknown> = {}; + if (isNewChat) { + if (thread.tool_use) patch.tool_use = thread.tool_use; + if (thread.mode) patch.mode = thread.mode; + } + if (thread.model) patch.model = thread.model; + if (thread.boost_reasoning !== undefined) + patch.boost_reasoning = thread.boost_reasoning; + if (thread.include_project_info !== undefined) + patch.include_project_info = thread.include_project_info; + if (thread.context_tokens_cap !== undefined) + patch.context_tokens_cap = thread.context_tokens_cap; + if (thread.temperature != null) patch.temperature = thread.temperature; + if (thread.frequency_penalty != null) + patch.frequency_penalty = thread.frequency_penalty; + if (thread.max_tokens != null) patch.max_tokens = thread.max_tokens; + if (thread.reasoning_effort != null) + patch.reasoning_effort = thread.reasoning_effort; + if (thread.thinking_budget != null) + patch.thinking_budget = thread.thinking_budget; + if (thread.parallel_tool_calls != null) + patch.parallel_tool_calls = thread.parallel_tool_calls; + return patch; +} + +export { buildThreadParamsPatch }; + +function toMessageContent( + content: import("../../../services/refact/types").UserMessage["content"], +): MessageContent { + if (typeof content === "string") return content; + if (!Array.isArray(content)) return ""; + const out: ( + | { type: "text"; text: string } + | { type: "image_url"; image_url: { url: string } } + )[] = []; + for (const item of content) { + if ("type" in item && "text" in item && (item.type as string) === "text") { + out.push({ type: "text", text: item.text }); + } else if ("type" in item && "image_url" in item) { + out.push({ type: "image_url", image_url: item.image_url }); + } else if ("m_type" in item && "m_content" in item) { + const { m_type, m_content } = item; + if (m_type === "text") { + out.push({ type: "text", text: String(m_content) }); + } else if (String(m_type).startsWith("image/")) { + out.push({ + type: "image_url", + image_url: { url: `data:${m_type};base64,${String(m_content)}` }, + }); + } + } + } + return out.length ? out : ""; +} export const newChatAction = createAction<Partial<ChatThread> | undefined>( "chatThread/new", ); -export const newIntegrationChat = createAction<{ - integration: IntegrationMeta; - messages: ChatMessages; - request_attempt_id: string; -}>("chatThread/newIntegrationChat"); +export interface TaskMeta { + task_id: string; + role: string; + agent_id?: string; + card_id?: string; +} + +export const sendIdeMessagesToCurrentChat = createAsyncThunk( + "chatThread/sendIdeMessagesToCurrentChat", + async (arg: { messages: ChatMessages; priority?: boolean }, api) => { + const state = api.getState() as RootState; + const chatId = selectCurrentThreadId(state); + const port = selectLspPort(state); + const apiKey = selectApiKey(state) ?? undefined; + if (!chatId || !port) return; + + const runtime = state.chat.threads[chatId]; + if (!runtime) return; + + const isNewChat = runtime.thread.messages.length === 0; + const patch = buildThreadParamsPatch(runtime.thread, isNewChat); + + if (Object.keys(patch).length > 0) { + await sendChatCommand(chatId, port, apiKey, { + type: "set_params", + patch, + }); + } -export const chatResponse = createAction<PayloadWithId & ChatResponse>( - "chatThread/response", + for (const m of arg.messages) { + if (!isUserMessage(m)) continue; + const content = toMessageContent(m.content); + const empty = + typeof content === "string" + ? content.trim().length === 0 + : content.length === 0; + if (empty) continue; + await sendUserMessage(chatId, content, port, apiKey, arg.priority); + } + }, ); -export const chatTitleGenerationResponse = createAction< - PayloadWithId & ChatResponse ->("chatTitleGeneration/response"); +export const createChatWithId = createAction<{ + id: string; + title?: string; + isTaskChat?: boolean; + mode?: string; + taskMeta?: TaskMeta; + model?: string; + parentId?: string; + linkType?: string; +}>("chatThread/createWithId"); + +export const newChatWithInitialMessages = createAsyncThunk( + "chatThread/newChatWithInitialMessages", + async ( + arg: { title?: string; messages: ChatMessages; priority?: boolean }, + api, + ) => { + api.dispatch(newChatAction({ title: arg.title })); + const state = api.getState() as RootState; + const chatId = state.chat.current_thread_id; + const port = selectLspPort(state); + const apiKey = selectApiKey(state) ?? undefined; + if (!chatId || !port) return; + + const runtime = state.chat.threads[chatId]; + if (runtime && runtime.thread.messages.length === 0) { + const patch = buildThreadParamsPatch(runtime.thread, true); + if (Object.keys(patch).length > 0) { + await sendChatCommand(chatId, port, apiKey, { + type: "set_params", + patch, + }); + } + } -export const chatAskedQuestion = createAction<PayloadWithId>( - "chatThread/askQuestion", + for (const m of arg.messages) { + if (!isUserMessage(m)) continue; + const content = toMessageContent(m.content); + const empty = + typeof content === "string" + ? content.trim().length === 0 + : content.length === 0; + if (empty) continue; + await sendUserMessage(chatId, content, port, apiKey, arg.priority); + } + }, ); +export const newIntegrationChat = createAction<{ + integration: IntegrationMeta; + messages: ChatMessages; + request_attempt_id: string; +}>("chatThread/newIntegrationChat"); + export const setLastUserMessageId = createAction<PayloadWithChatAndMessageId>( "chatThread/setLastUserMessageId", ); -// TBD: only used when `/links` suggests a new chat. export const setIsNewChatSuggested = createAction<PayloadWithChatAndBoolean>( "chatThread/setIsNewChatSuggested", ); @@ -79,19 +210,13 @@ export const backUpMessages = createAction< } >("chatThread/backUpMessages"); -// TODO: add history actions to this, maybe not used any more -export const chatError = createAction<PayloadWithId & { message: string }>( - "chatThread/error", -); - -// TODO: include history actions with this one, this could be done by making it a thunk, or use reduce-reducers. -export const doneStreaming = createAction<PayloadWithId>( - "chatThread/doneStreaming", -); - export const setChatModel = createAction<string>("chatThread/setChatModel"); -export const getSelectedChatModel = (state: RootState) => - state.chat.thread.model; +export const getSelectedChatModel = (state: RootState) => { + const runtime = state.chat.threads[state.chat.current_thread_id] as + | { thread: { model: string } } + | undefined; + return runtime?.thread.model ?? ""; +}; export const setSystemPrompt = createAction<SystemPrompts>( "chatThread/setSystemPrompt", @@ -105,6 +230,74 @@ export const restoreChat = createAction<ChatHistoryItem>( "chatThread/restoreChat", ); +export const updateOpenThread = createAction<{ + id: string; + thread: Partial<ChatThread>; +}>("chatThread/updateOpenThread"); + +export const updateChatRuntimeFromSessionState = createAction<{ + id: string; + session_state: + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + error?: string; +}>("chatThread/updateChatRuntimeFromSessionState"); + +export const switchToThread = createAction< + PayloadWithId & { openTab?: boolean } +>("chatThread/switchToThread"); + +export const closeThread = createAction<PayloadWithId & { force?: boolean }>( + "chatThread/closeThread", +); + +export const setThreadPauseReasons = createAction<{ + id: string; + pauseReasons: ToolConfirmationPauseReason[]; +}>("chatThread/setPauseReasons"); + +export const clearThreadPauseReasons = createAction<PayloadWithId>( + "chatThread/clearPauseReasons", +); + +export const setThreadConfirmationStatus = createAction<{ + id: string; + wasInteracted: boolean; + confirmationStatus: boolean; +}>("chatThread/setConfirmationStatus"); + +export const addThreadImage = createAction<{ id: string; image: ImageFile }>( + "chatThread/addImage", +); + +export const removeThreadImageByIndex = createAction<{ + id: string; + index: number; +}>("chatThread/removeImageByIndex"); + +export const resetThreadImages = createAction<PayloadWithId>( + "chatThread/resetImages", +); + +export const addThreadTextFile = createAction<{ id: string; file: TextFile }>( + "chatThread/addTextFile", +); + +export const removeThreadTextFileByIndex = createAction<{ + id: string; + index: number; +}>("chatThread/removeTextFileByIndex"); + +export const resetThreadTextFiles = createAction<PayloadWithId>( + "chatThread/resetTextFiles", +); + export const clearChatError = createAction<PayloadWithId>( "chatThread/clearError", ); @@ -116,16 +309,20 @@ export const setPreventSend = createAction<PayloadWithId>( export const setAreFollowUpsEnabled = createAction<boolean>( "chat/setAreFollowUpsEnabled", ); -export const setIsTitleGenerationEnabled = createAction<boolean>( - "chat/setIsTitleGenerationEnabled", -); - -export const setUseCompression = createAction<boolean>( - "chat/setUseCompression", -); export const setToolUse = createAction<ToolUse>("chatThread/setToolUse"); +export const setThreadMode = createAction<{ + chatId: string; + mode: string; + threadDefaults?: { + include_project_info?: boolean; + checkpoints_enabled?: boolean; + auto_approve_editing_tools?: boolean; + auto_approve_dangerous_commands?: boolean; + }; +}>("chatThread/setThreadMode"); + export const setEnabledCheckpoints = createAction<boolean>( "chat/setEnabledCheckpoints", ); @@ -134,9 +331,15 @@ export const setBoostReasoning = createAction<PayloadWithChatAndBoolean>( "chatThread/setBoostReasoning", ); -export const setAutomaticPatch = createAction<PayloadWithChatAndBoolean>( - "chatThread/setAutomaticPatch", -); +export const setAutoApproveEditingTools = + createAction<PayloadWithChatAndBoolean>( + "chatThread/setAutoApproveEditingTools", + ); + +export const setAutoApproveDangerousCommands = + createAction<PayloadWithChatAndBoolean>( + "chatThread/setAutoApproveDangerousCommands", + ); export const saveTitle = createAction<PayloadWithIdAndTitle>( "chatThread/saveTitle", @@ -146,35 +349,17 @@ export const setSendImmediately = createAction<boolean>( "chatThread/setSendImmediately", ); -export type EnqueueUserMessagePayload = { - id: string; - message: import("../../../services/refact/types").UserMessage; - createdAt: number; -}; - -export const enqueueUserMessage = createAction< - EnqueueUserMessagePayload & { priority?: boolean } ->("chatThread/enqueueUserMessage"); - -export const dequeueUserMessage = createAction<{ queuedId: string }>( - "chatThread/dequeueUserMessage", -); - -export const clearQueuedMessages = createAction( - "chatThread/clearQueuedMessages", -); - export const setChatMode = createAction<LspChatMode>("chatThread/setChatMode"); export const setIntegrationData = createAction<Partial<IntegrationMeta> | null>( "chatThread/setIntegrationData", ); -export const setIsWaitingForResponse = createAction<boolean>( - "chatThread/setIsWaiting", -); +export const setIsWaitingForResponse = createAction<{ + id: string; + value: boolean; +}>("chatThread/setIsWaiting"); -// TBD: maybe remove it's only used by a smart link. export const setMaxNewTokens = createAction<number>( "chatThread/setMaxNewTokens", ); @@ -199,278 +384,100 @@ export const setContextTokensCap = createAction<PayloadWithChatAndNumber>( "chatThread/setContextTokensCap", ); -// TODO: This is the circular dep when imported from hooks :/ -const createAppAsyncThunk = createAsyncThunk.withTypes<{ - state: RootState; - dispatch: AppDispatch; -}>(); - -export const chatGenerateTitleThunk = createAppAsyncThunk< - unknown, - { - messages: ChatMessages; - chatId: string; +export const setReasoningEffort = createAction<{ + chatId: string; + value: + | "none" + | "minimal" + | "low" + | "medium" + | "high" + | "xhigh" + | "max" + | null; +}>("chatThread/setReasoningEffort"); + +export const setThinkingBudget = createAction<{ + chatId: string; + value: number | null; +}>("chatThread/setThinkingBudget"); + +export const setTemperature = createAction<{ + chatId: string; + value: number | null; +}>("chatThread/setTemperature"); + +export const setFrequencyPenalty = createAction<{ + chatId: string; + value: number | null; +}>("chatThread/setFrequencyPenalty"); + +export const setMaxTokens = createAction<{ + chatId: string; + value: number | null; +}>("chatThread/setMaxTokens"); + +export const setParallelToolCalls = createAction<{ + chatId: string; + value: boolean | null; +}>("chatThread/setParallelToolCalls"); + +export const restoreChatFromBackend = createAsyncThunk< + undefined, + { id: string; fallback: ChatHistoryItem }, + { dispatch: AppDispatch; state: RootState } +>("chatThread/restoreChatFromBackend", async ({ id, fallback }, thunkApi) => { + try { + const result = await thunkApi + .dispatch( + trajectoriesApi.endpoints.getTrajectory.initiate(id, { + forceRefetch: true, + }), + ) + .unwrap(); + + const thread = trajectoryDataToChatThread(result); + const historyItem: ChatHistoryItem = { + ...thread, + createdAt: result.created_at, + updatedAt: result.updated_at, + title: result.title, + isTitleGenerated: result.isTitleGenerated, + }; + + thunkApi.dispatch(restoreChat(historyItem)); + } catch { + thunkApi.dispatch(restoreChat(fallback)); } ->("chatThread/generateTitle", async ({ messages, chatId }, thunkAPI) => { - const state = thunkAPI.getState(); - - const messagesToSend = messages.filter( - (msg) => - !isToolMessage(msg) && !isAssistantMessage(msg) && msg.content !== "", - ); - // .map((msg) => { - // if (isAssistantMessage(msg)) { - // return { - // role: msg.role, - // content: msg.content, - // }; - // } - // return msg; - // }); - - const caps = await thunkAPI - .dispatch(capsApi.endpoints.getCaps.initiate(undefined)) - .unwrap(); - const model = caps.chat_default_model; - const messagesForLsp = formatMessagesForLsp([ - ...messagesToSend, - { - role: "user", - content: - "Summarize the chat above in 2-3 words. Prefer filenames, classes, entities, and avoid generic terms. Example: 'Explain MyClass::f()'. Write nothing else, only the 2-3 words.", - checkpoints: [], - }, - ]); - - const chatResponseChunks: ChatResponse[] = []; - - return generateChatTitle({ - messages: messagesForLsp, - model, - stream: true, - abortSignal: thunkAPI.signal, - chatId, - apiKey: state.config.apiKey, - port: state.config.lspPort, - }) - .then((response) => { - if (!response.ok) { - return Promise.reject(new Error(response.statusText)); - } - const reader = response.body?.getReader(); - if (!reader) return; - const onAbort = () => thunkAPI.dispatch(setPreventSend({ id: chatId })); - const onChunk = (json: Record<string, unknown>) => { - chatResponseChunks.push(json as ChatResponse); - }; - return consumeStream(reader, thunkAPI.signal, onAbort, onChunk); - }) - .catch((err: Error) => { - thunkAPI.dispatch(doneStreaming({ id: chatId })); - thunkAPI.dispatch(chatError({ id: chatId, message: err.message })); - return thunkAPI.rejectWithValue(err.message); - }) - .finally(() => { - const title = chatResponseChunks.reduce<string>((acc, chunk) => { - if (isChatResponseChoice(chunk)) { - if (isAssistantDelta(chunk.choices[0].delta)) { - const deltaContent = chunk.choices[0].delta.content; - if (deltaContent) { - return acc + deltaContent; - } - } - } - return acc; - }, ""); - - thunkAPI.dispatch( - saveTitle({ id: chatId, title, isTitleGenerated: true }), - ); - thunkAPI.dispatch(doneStreaming({ id: chatId })); - }); + return undefined; }); -function checkForToolLoop(message: ChatMessages): boolean { - const assistantOrToolMessages = takeFromEndWhile(message, (message) => { - return ( - isToolMessage(message) || - isToolCallMessage(message) || - isCDInstructionMessage(message) - ); - }); - - if (assistantOrToolMessages.length === 0) return false; - - const toolCalls = assistantOrToolMessages.reduce<ToolCall[]>((acc, cur) => { - if (!isToolCallMessage(cur)) return acc; - return acc.concat(cur.tool_calls); - }, []); - - if (toolCalls.length === 0) return false; - - const toolResults = assistantOrToolMessages.filter(isToolMessage); - - const hasDuplicates = scanFoDuplicatesWith(toolCalls, (a, b) => { - const aResult: ToolMessage | undefined = toolResults.find( - (message) => message.content.tool_call_id === a.id, - ); - - const bResult: ToolMessage | undefined = toolResults.find( - (message) => message.content.tool_call_id === b.id, - ); - - return ( - a.function.name === b.function.name && - a.function.arguments === b.function.arguments && - !!aResult && - !!bResult && - aResult.content.content === bResult.content.content - ); - }); - - return hasDuplicates; -} -// TODO: add props for config chat - -export const chatAskQuestionThunk = createAppAsyncThunk< - unknown, - { - messages: ChatMessages; - chatId: string; - checkpointsEnabled?: boolean; - mode?: LspChatMode; // used once for actions - // TODO: make a separate function for this... and it'll need to be saved. - } ->( - "chatThread/sendChat", - ({ messages, chatId, mode, checkpointsEnabled }, thunkAPI) => { - const state = thunkAPI.getState(); - - const thread = - chatId in state.chat.cache - ? state.chat.cache[chatId] - : state.chat.thread.id === chatId - ? state.chat.thread - : null; - - // stops the stream - const onlyDeterministicMessages = checkForToolLoop(messages); - - const messagesForLsp = formatMessagesForLsp(messages); - const realMode = mode ?? thread?.mode; - const maybeLastUserMessageId = thread?.last_user_message_id; - const boostReasoning = thread?.boost_reasoning ?? false; - const increaseMaxTokens = thread?.increase_max_tokens ?? false; - // Only send include_project_info on the first message of a chat - // Check if there's only one user message (the current one being sent) - const userMessageCount = messages.filter(isUserMessage).length; - const includeProjectInfo = - userMessageCount <= 1 ? thread?.include_project_info ?? true : undefined; - - // Context tokens cap - send on every request, default to max if not set - const contextTokensCap = - thread?.context_tokens_cap ?? thread?.currentMaximumContextTokens; - - // Use compression - get from state - const useCompression = state.chat.use_compression; - - return sendChat({ - messages: messagesForLsp, - last_user_message_id: maybeLastUserMessageId, - model: state.chat.thread.model, - stream: true, - abortSignal: thunkAPI.signal, - increase_max_tokens: increaseMaxTokens, - chatId, - apiKey: state.config.apiKey, - port: state.config.lspPort, - onlyDeterministicMessages, - checkpointsEnabled, - integration: thread?.integration, - mode: realMode, - boost_reasoning: boostReasoning, - include_project_info: includeProjectInfo, - context_tokens_cap: contextTokensCap, - use_compression: useCompression, - }) - .then(async (response) => { - if (!response.ok) { - const responseData = (await response.json()) as unknown; - return Promise.reject(responseData); - } - const reader = response.body?.getReader(); - if (!reader) return; - const onAbort = () => { - thunkAPI.dispatch(setPreventSend({ id: chatId })); - thunkAPI.dispatch(fixBrokenToolMessages({ id: chatId })); - }; - const onChunk = (json: Record<string, unknown>) => { - const action = chatResponse({ - ...(json as ChatResponse), - id: chatId, - }); - return thunkAPI.dispatch(action); - }; - return consumeStream(reader, thunkAPI.signal, onAbort, onChunk); - }) - .catch((err: unknown) => { - // console.log("Catch called"); - const isError = err instanceof Error; - thunkAPI.dispatch(doneStreaming({ id: chatId })); - thunkAPI.dispatch(fixBrokenToolMessages({ id: chatId })); - - const errorObject: DetailMessageWithErrorType = { - detail: isError - ? err.message - : isDetailMessage(err) - ? err.detail - : (err as string), - errorType: isError ? "CHAT" : "GLOBAL", - }; - - return thunkAPI.rejectWithValue(errorObject); - }) - .finally(() => { - thunkAPI.dispatch(doneStreaming({ id: chatId })); - }); - }, +import type { ChatEventEnvelope } from "../../../services/refact/chatSubscription"; + +export const applyChatEvent = createAction<ChatEventEnvelope>( + "chatThread/applyChatEvent", ); -export const sendCurrentChatToLspAfterToolCallUpdate = createAppAsyncThunk< - unknown, - { chatId: string; toolCallId: string } ->( - "chatThread/sendCurrentChatToLspAfterToolCallUpdate", - async ({ chatId, toolCallId }, thunkApi) => { - const state = thunkApi.getState(); - if (state.chat.thread.id !== chatId) return; - if ( - state.chat.streaming || - state.chat.prevent_send || - state.chat.waiting_for_response - ) { - return; - } - const lastMessages = takeFromEndWhile( - state.chat.thread.messages, - (message) => !isUserMessage(message) && !isAssistantMessage(message), - ); - - const toolUseInThisSet = lastMessages.some( - (message) => - isToolMessage(message) && message.content.tool_call_id === toolCallId, - ); - - if (!toolUseInThisSet) return; - thunkApi.dispatch(setIsWaitingForResponse(true)); - - return thunkApi.dispatch( - chatAskQuestionThunk({ - messages: state.chat.thread.messages, - chatId, - mode: state.chat.thread.mode, - checkpointsEnabled: state.chat.checkpoints_enabled, - }), - ); - }, +export type IdeToolRequiredPayload = { + chatId: string; + toolCallId: string; + toolName: string; + args: unknown; +}; + +export const ideToolRequired = createAction<IdeToolRequiredPayload>( + "chatThread/ideToolRequired", ); + +export const requestSseRefresh = createAction<{ chatId: string }>( + "chatThread/requestSseRefresh", +); + +export const clearSseRefreshRequest = createAction( + "chatThread/clearSseRefreshRequest", +); + +export const setTaskWidgetExpanded = createAction<{ + id: string; + expanded: boolean; +}>("chatThread/setTaskWidgetExpanded"); diff --git a/refact-agent/gui/src/features/Chat/Thread/reducer.edge-cases.test.ts b/refact-agent/gui/src/features/Chat/Thread/reducer.edge-cases.test.ts new file mode 100644 index 000000000..9d69fc775 --- /dev/null +++ b/refact-agent/gui/src/features/Chat/Thread/reducer.edge-cases.test.ts @@ -0,0 +1,490 @@ +/* eslint-disable @typescript-eslint/no-explicit-any, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-non-null-assertion */ +import { expect, test, describe, beforeEach } from "vitest"; +import { chatReducer } from "./reducer"; +import type { Chat } from "./types"; +import { newChatAction, applyChatEvent } from "./actions"; +import type { ChatEventEnvelope } from "../../../services/refact/chatSubscription"; +import type { ChatMessage } from "../../../services/refact/types"; + +describe("Chat Thread Reducer - Edge Cases", () => { + let initialState: Chat; + let chatId: string; + + beforeEach(() => { + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + initialState = chatReducer(emptyState, newChatAction(undefined)); + chatId = initialState.current_thread_id; + }); + + const createSnapshot = (messages: ChatMessage[] = []): ChatEventEnvelope => ({ + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages, + }); + + describe("preserve streaming fields on final message_added", () => { + test("should keep reasoning_content from streaming when message_added arrives", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-123", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + const deltaWithReasoning: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-123", + ops: [ + { op: "append_reasoning", text: "Let me think about this..." }, + { op: "append_content", text: "Here is my answer" }, + ], + }; + state = chatReducer(state, applyChatEvent(deltaWithReasoning)); + + const messageAdded: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + message_id: "msg-123", + role: "assistant", + content: "Here is my answer", + }, + index: 1, + }; + state = chatReducer(state, applyChatEvent(messageAdded)); + + const runtime = state.threads[chatId]!; + const assistantMsg = runtime.thread.messages[1]; + + expect(assistantMsg.role).toBe("assistant"); + expect(assistantMsg.content).toBe("Here is my answer"); + if (assistantMsg.role === "assistant") { + expect(assistantMsg.reasoning_content).toBe( + "Let me think about this...", + ); + } + }); + + test("should keep thinking_blocks from streaming when message_added arrives", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-456", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + const deltaWithThinking: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-456", + ops: [ + { + op: "set_thinking_blocks", + blocks: [{ type: "thinking", thinking: "Deep thought" }], + }, + { op: "append_content", text: "Answer" }, + ], + }; + state = chatReducer(state, applyChatEvent(deltaWithThinking)); + + const messageAdded: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + message_id: "msg-456", + role: "assistant", + content: "Answer", + }, + index: 1, + }; + state = chatReducer(state, applyChatEvent(messageAdded)); + + const runtime = state.threads[chatId]!; + const assistantMsg = runtime.thread.messages[1]; + + expect(assistantMsg.role).toBe("assistant"); + if (assistantMsg.role === "assistant") { + expect(assistantMsg.thinking_blocks).toBeDefined(); + expect(assistantMsg.thinking_blocks?.length).toBe(1); + } + }); + + test("should keep usage from streaming when message_added arrives", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-789", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + const deltaWithUsage: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-789", + ops: [ + { op: "append_content", text: "Response" }, + { + op: "set_usage", + usage: { + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150, + }, + }, + ], + }; + state = chatReducer(state, applyChatEvent(deltaWithUsage)); + + const messageAdded: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + message_id: "msg-789", + role: "assistant", + content: "Response", + }, + index: 1, + }; + state = chatReducer(state, applyChatEvent(messageAdded)); + + const runtime = state.threads[chatId]!; + const assistantMsg = runtime.thread.messages[1]; + + expect(assistantMsg.role).toBe("assistant"); + if (assistantMsg.role === "assistant") { + expect(assistantMsg.usage).toBeDefined(); + expect(assistantMsg.usage?.prompt_tokens).toBe(100); + } + }); + }); + + describe("empty snapshot handling", () => { + test("should accept empty snapshot as source of truth (backend may clear/truncate)", () => { + let state = chatReducer( + initialState, + applyChatEvent( + createSnapshot([ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there!" }, + ]), + ), + ); + + const runtime1 = state.threads[chatId]!; + expect(runtime1.thread.messages).toHaveLength(2); + + const emptySnapshot: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + state = chatReducer(state, applyChatEvent(emptySnapshot)); + const runtime2 = state.threads[chatId]!; + + // Empty snapshots are accepted as truth to prevent permanent desync + expect(runtime2.thread.messages).toHaveLength(0); + }); + + test("should update thread params even with empty snapshot", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const emptySnapshot: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "snapshot", + thread: { + id: chatId, + title: "Updated Title", + model: "gpt-4o", + mode: "explore", + tool_use: "explore", + boost_reasoning: true, + context_tokens_cap: 4096, + include_project_info: false, + checkpoints_enabled: false, + is_title_generated: true, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 1, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + state = chatReducer(state, applyChatEvent(emptySnapshot)); + const runtime = state.threads[chatId]!; + + // Empty snapshots clear messages (backend is source of truth) + expect(runtime.thread.messages).toHaveLength(0); + // But thread params are updated + expect(runtime.thread.title).toBe("Updated Title"); + expect(runtime.thread.model).toBe("gpt-4o"); + expect(runtime.thread.mode).toBe("explore"); + }); + }); + + describe("merge_extra safety", () => { + test("should merge extra fields incrementally", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-extra", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + const delta1: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-extra", + ops: [{ op: "merge_extra", extra: { metering_a: 100 } }], + }; + state = chatReducer(state, applyChatEvent(delta1)); + + const delta2: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "stream_delta", + message_id: "msg-extra", + ops: [{ op: "merge_extra", extra: { metering_b: 200 } }], + }; + state = chatReducer(state, applyChatEvent(delta2)); + + const delta3: ChatEventEnvelope = { + chat_id: chatId, + seq: "5", + type: "stream_delta", + message_id: "msg-extra", + ops: [{ op: "merge_extra", extra: { metering_a: 150 } }], + }; + state = chatReducer(state, applyChatEvent(delta3)); + + const runtime = state.threads[chatId]!; + const msg = runtime.thread.messages.find( + (m) => m.message_id === "msg-extra", + ) as Record<string, unknown> | undefined; + + expect((msg?.extra as any)?.metering_a).toBe(150); + expect((msg?.extra as any)?.metering_b).toBe(200); + }); + }); + + describe("abort event sequence", () => { + test("should handle stream_finished with abort reason", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-abort", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + expect(state.threads[chatId]!.streaming).toBe(true); + + const streamFinished: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_finished", + message_id: "msg-abort", + finish_reason: "abort", + }; + state = chatReducer(state, applyChatEvent(streamFinished)); + + const messageRemoved: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "message_removed", + message_id: "msg-abort", + }; + state = chatReducer(state, applyChatEvent(messageRemoved)); + + // Note: runtime state (streaming, etc.) is now controlled by sidebar SSE session_state updates + // stream_finished sets streaming to false + const runtime = state.threads[chatId]!; + expect(runtime.streaming).toBe(false); + expect(runtime.thread.messages).toHaveLength(1); + expect(runtime.thread.messages[0].role).toBe("user"); + }); + }); + + describe("pause lifecycle events", () => { + test("should handle pause_required event", () => { + let state = chatReducer( + initialState, + applyChatEvent( + createSnapshot([{ role: "user", content: "Run shell command" }]), + ), + ); + + const pauseRequired: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "pause_required", + reasons: [ + { + type: "confirmation", + tool_name: "shell", + command: "shell", + rule: "deny_all", + tool_call_id: "tc-1", + integr_config_path: null, + }, + ], + }; + state = chatReducer(state, applyChatEvent(pauseRequired)); + + const runtime = state.threads[chatId]!; + expect(runtime.confirmation.pause).toBe(true); + expect(runtime.confirmation.pause_reasons).toHaveLength(1); + }); + + test("should handle pause_cleared event", () => { + let state = chatReducer(initialState, applyChatEvent(createSnapshot([]))); + + const pauseRequired: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "pause_required", + reasons: [ + { + type: "confirmation", + tool_name: "shell", + command: "shell", + rule: "deny_all", + tool_call_id: "tc-1", + integr_config_path: null, + }, + ], + }; + state = chatReducer(state, applyChatEvent(pauseRequired)); + expect(state.threads[chatId]!.confirmation.pause).toBe(true); + + const pauseCleared: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "pause_cleared", + }; + state = chatReducer(state, applyChatEvent(pauseCleared)); + + expect(state.threads[chatId]!.confirmation.pause).toBe(false); + expect(state.threads[chatId]!.confirmation.pause_reasons).toHaveLength(0); + }); + }); + + describe("error state handling", () => { + test("should handle error without content (message_removed path)", () => { + let state = chatReducer( + initialState, + applyChatEvent(createSnapshot([{ role: "user", content: "Hello" }])), + ); + + const streamStart: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-error", + }; + state = chatReducer(state, applyChatEvent(streamStart)); + + const messageRemoved: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "message_removed", + message_id: "msg-error", + }; + state = chatReducer(state, applyChatEvent(messageRemoved)); + + // Note: error state is now controlled by sidebar SSE session_state updates + // Here we just verify the message was removed correctly + const runtime = state.threads[chatId]!; + expect(runtime.thread.messages).toHaveLength(1); + expect(runtime.thread.messages[0].role).toBe("user"); + }); + }); +}); diff --git a/refact-agent/gui/src/features/Chat/Thread/reducer.test.ts b/refact-agent/gui/src/features/Chat/Thread/reducer.test.ts index c56c3a80d..760730c69 100644 --- a/refact-agent/gui/src/features/Chat/Thread/reducer.test.ts +++ b/refact-agent/gui/src/features/Chat/Thread/reducer.test.ts @@ -1,19 +1,1531 @@ -import { expect, test, describe } from "vitest"; +/* eslint-disable @typescript-eslint/no-non-null-assertion */ +import { expect, test, describe, beforeEach } from "vitest"; import { chatReducer } from "./reducer"; -import { chatResponse } from "./actions"; -import { createAction } from "@reduxjs/toolkit"; +import type { Chat } from "./types"; +import { newChatAction, applyChatEvent } from "./actions"; +import type { + ChatEventEnvelope, + DeltaOp, +} from "../../../services/refact/chatSubscription"; -describe("Chat Thread Reducer", () => { - test("streaming should be true on any response", () => { - const init = chatReducer(undefined, createAction("noop")()); - const msg = chatResponse({ - id: init.thread.id, - role: "tool", - tool_call_id: "test_tool", - content: "👀", +describe("Chat Thread Reducer - Event-based (Stateless Trajectory UI)", () => { + let initialState: Chat; + let chatId: string; + + beforeEach(() => { + const emptyState = chatReducer(undefined, { type: "@@INIT" }); + initialState = chatReducer(emptyState, newChatAction(undefined)); + chatId = initialState.current_thread_id; + }); + + describe("applyChatEvent - snapshot", () => { + test("should initialize thread from snapshot event", () => { + const event: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test Chat", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: 8192, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there!" }, + ], + }; + + const result = chatReducer(initialState, applyChatEvent(event)); + const runtime = result.threads[chatId]!; + + expect(runtime).toBeDefined(); + expect(runtime.thread.title).toBe("Test Chat"); + expect(runtime.thread.model).toBe("gpt-4"); + expect(runtime.thread.messages).toHaveLength(2); + expect(runtime.streaming).toBe(false); + expect(runtime.waiting_for_response).toBe(false); + }); + + test("should handle snapshot with generating state", () => { + const event: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + const result = chatReducer(initialState, applyChatEvent(event)); + const runtime = result.threads[chatId]!; + + expect(runtime.streaming).toBe(true); + expect(runtime.waiting_for_response).toBe(true); + }); + + test("should handle snapshot with paused state", () => { + const event: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: true, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + const result = chatReducer(initialState, applyChatEvent(event)); + const runtime = result.threads[chatId]!; + + expect(runtime.confirmation.pause).toBe(true); + }); + + test("should handle snapshot with error state", () => { + const event: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "error", // Must be "error" state for prevent_send to be true + paused: false, + error: "Something went wrong", + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + const result = chatReducer(initialState, applyChatEvent(event)); + const runtime = result.threads[chatId]!; + + expect(runtime.error).toBe("Something went wrong"); + // Allow sending even on error for recovery + expect(runtime.prevent_send).toBe(false); + }); + }); + + describe("applyChatEvent - stream_delta", () => { + test("should append content via delta ops", () => { + // First set up a thread with an assistant message that has a message_id + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // Use stream_started to add assistant message with message_id + const streamStartEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-1", + }; + + state = chatReducer(state, applyChatEvent(streamStartEvent)); + + // Now apply a delta + const deltaEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "Hi there!" }], + }; + + state = chatReducer(state, applyChatEvent(deltaEvent)); + const runtime = state.threads[chatId]!; + const lastMessage = + runtime.thread.messages[runtime.thread.messages.length - 1]; + + expect(lastMessage.content).toBe("Hi there!"); + }); + + test("should handle reasoning content delta", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: true, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Explain" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // Use stream_started to add assistant message + const streamStartEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-1", + }; + + state = chatReducer(state, applyChatEvent(streamStartEvent)); + + const deltaEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_reasoning", text: "Let me think about this..." }], + }; + + state = chatReducer(state, applyChatEvent(deltaEvent)); + const runtime = state.threads[chatId]!; + const lastMessage = + runtime.thread.messages[runtime.thread.messages.length - 1]; + + expect(lastMessage).toHaveProperty( + "reasoning_content", + "Let me think about this...", + ); + }); + }); + + describe("applyChatEvent - message_added", () => { + test("should add message at index", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const addEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "message_added", + message: { role: "assistant", content: "Hi!" }, + index: 1, + }; + + state = chatReducer(state, applyChatEvent(addEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(2); + expect(runtime.thread.messages[1].content).toBe("Hi!"); + }); + + test("should replace existing message with same message_id (deduplication)", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // First, stream_started adds a placeholder with message_id + const streamStartEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-123", + }; + state = chatReducer(state, applyChatEvent(streamStartEvent)); + + // Add some streaming content + const deltaEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-123", + ops: [{ op: "append_content", text: "Streaming content..." }], + }; + state = chatReducer(state, applyChatEvent(deltaEvent)); + + // Now message_added comes with the same message_id - should REPLACE, not duplicate + const addEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + role: "assistant", + content: "Final complete content", + message_id: "msg-123", + }, + index: 1, + }; + + state = chatReducer(state, applyChatEvent(addEvent)); + const runtime = state.threads[chatId]!; + + // Should still have only 2 messages (user + assistant), not 3 + expect(runtime.thread.messages).toHaveLength(2); + // Content should be the final version, not streaming version + expect(runtime.thread.messages[1].content).toBe("Final complete content"); + }); + + test("should preserve server fields and update seq when replacing assistant message", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-assistant-1", + }), + ); + + const deltaOps: DeltaOp[] = [ + { + op: "set_tool_calls", + tool_calls: [ + { id: "call-1", function: { name: "web", arguments: "{}" } }, + ], + }, + { + op: "add_server_content_block", + block: { type: "text", text: "server block" }, + }, + { + op: "merge_extra", + extra: { metering_balance: 42 }, + }, + { + op: "append_reasoning", + text: "stream reasoning", + }, + ]; + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-assistant-1", + ops: deltaOps, + }), + ); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + role: "assistant", + content: "Final", + message_id: "msg-assistant-1", + }, + index: 1, + }), + ); + + const runtime = state.threads[chatId]!; + const assistant = runtime.thread.messages[1]; + if (assistant.role !== "assistant") { + throw new Error("Expected assistant message"); + } + expect(assistant.tool_calls).toHaveLength(1); + expect(assistant.server_content_blocks).toHaveLength(1); + expect(assistant.extra).toEqual({ metering_balance: 42 }); + expect(assistant.reasoning_content).toBe("stream reasoning"); + expect(runtime.last_applied_seq).toBe("4"); + + const replayed = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + role: "assistant", + content: "Should be ignored", + message_id: "msg-assistant-1", + }, + index: 1, + }), + ); + + const replayedAssistant = replayed.threads[chatId]!.thread.messages[1]; + expect(replayedAssistant.content).toBe("Final"); + }); + + test("should clamp negative message_added index to start", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Existing-1", message_id: "m1" }, + { role: "assistant", content: "Existing-2", message_id: "m2" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "message_added", + message: { + role: "user", + content: "Inserted at start", + message_id: "m-new", + }, + index: -5, + }), + ); + + const runtime = state.threads[chatId]!; + expect(runtime.thread.messages[0].content).toBe("Inserted at start"); + expect(runtime.thread.messages).toHaveLength(3); + }); + }); + + describe("applyChatEvent - pause_required", () => { + test("should set pause state and reasons", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const pauseEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "pause_required", + reasons: [ + { + type: "confirmation", + tool_name: "shell", + command: "shell rm -rf /", + rule: "dangerous_command", + tool_call_id: "call_123", + integr_config_path: null, + }, + ], + }; + + state = chatReducer(state, applyChatEvent(pauseEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.confirmation.pause).toBe(true); + expect(runtime.confirmation.pause_reasons).toHaveLength(1); + expect(runtime.confirmation.pause_reasons[0].tool_call_id).toBe( + "call_123", + ); + // Note: streaming state is controlled by sidebar SSE session_state updates + }); + }); + + describe("applyChatEvent - message_updated", () => { + test("should update message content by message_id", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Original", message_id: "msg-user-1" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const updateEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "message_updated", + message_id: "msg-user-1", + message: { + role: "user", + content: "Updated content", + message_id: "msg-user-1", + }, + }; + + state = chatReducer(state, applyChatEvent(updateEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(1); + expect(runtime.thread.messages[0].content).toBe("Updated content"); }); - const result = chatReducer(init, msg); - expect(result.streaming).toEqual(true); + test("should not affect other messages when updating", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "First", message_id: "msg-1" }, + { role: "assistant", content: "Response", message_id: "msg-2" }, + { role: "user", content: "Second", message_id: "msg-3" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const updateEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "message_updated", + message_id: "msg-2", + message: { + role: "assistant", + content: "Updated response", + message_id: "msg-2", + }, + }; + + state = chatReducer(state, applyChatEvent(updateEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(3); + expect(runtime.thread.messages[0].content).toBe("First"); + expect(runtime.thread.messages[1].content).toBe("Updated response"); + expect(runtime.thread.messages[2].content).toBe("Second"); + }); + }); + + describe("applyChatEvent - message_removed", () => { + test("should remove message by message_id", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Hello", message_id: "msg-1" }, + { role: "assistant", content: "Hi", message_id: "msg-2" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const removeEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "message_removed", + message_id: "msg-2", + }; + + state = chatReducer(state, applyChatEvent(removeEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(1); + expect(runtime.thread.messages[0].content).toBe("Hello"); + }); + + test("should handle removing non-existent message gracefully", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello", message_id: "msg-1" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const removeEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "message_removed", + message_id: "non-existent-id", + }; + + state = chatReducer(state, applyChatEvent(removeEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(1); + }); + }); + + describe("applyChatEvent - messages_truncated", () => { + test("should truncate messages from index", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "First", message_id: "msg-1" }, + { role: "assistant", content: "Response 1", message_id: "msg-2" }, + { role: "user", content: "Second", message_id: "msg-3" }, + { role: "assistant", content: "Response 2", message_id: "msg-4" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const truncateEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "messages_truncated", + from_index: 2, + }; + + state = chatReducer(state, applyChatEvent(truncateEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(2); + expect(runtime.thread.messages[0].content).toBe("First"); + expect(runtime.thread.messages[1].content).toBe("Response 1"); + }); + + test("should handle truncate from index 0", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Hello", message_id: "msg-1" }, + { role: "assistant", content: "Hi", message_id: "msg-2" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const truncateEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "messages_truncated", + from_index: 0, + }; + + state = chatReducer(state, applyChatEvent(truncateEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.messages).toHaveLength(0); + }); + + test("should clamp negative truncate index to 0", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "A", message_id: "m1" }, + { role: "assistant", content: "B", message_id: "m2" }, + ], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "messages_truncated", + from_index: -1, + }), + ); + + const runtime = state.threads[chatId]!; + expect(runtime.thread.messages).toHaveLength(0); + }); + }); + + describe("applyChatEvent - thread_updated", () => { + test("should update thread params", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-3.5", + mode: "NO_TOOLS", + tool_use: "quick", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const updateEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "2", + type: "thread_updated", + model: "gpt-4", + mode: "agent", + boost_reasoning: true, + }; + + state = chatReducer(state, applyChatEvent(updateEvent)); + const runtime = state.threads[chatId]!; + + expect(runtime.thread.model).toBe("gpt-4"); + expect(runtime.thread.mode).toBe("agent"); + expect(runtime.thread.boost_reasoning).toBe(true); + }); + }); + + describe("Event sequence handling", () => { + test("should ignore events for unknown chat_id", () => { + const event: ChatEventEnvelope = { + chat_id: "unknown-chat-id", + seq: "1", + type: "stream_started", + message_id: "msg-1", + }; + + const result = chatReducer(initialState, applyChatEvent(event)); + + expect(result.threads["unknown-chat-id"]).toBeUndefined(); + }); + + test("should process events in sequence", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hi" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + const events: ChatEventEnvelope[] = [ + { + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-1", + }, + { + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "Hello!" }], + }, + { + chat_id: chatId, + seq: "4", + type: "stream_finished", + message_id: "msg-1", + finish_reason: "stop", + }, + ]; + + for (const event of events) { + state = chatReducer(state, applyChatEvent(event)); + } + + const runtime = state.threads[chatId]!; + expect(runtime.streaming).toBe(false); + expect(runtime.waiting_for_response).toBe(false); + expect(runtime.thread.messages).toHaveLength(2); + expect(runtime.thread.messages[1].content).toBe("Hello!"); + }); + }); + + describe("applyChatEvent - ack and ide_tool_required seq guards", () => { + test("ack should advance last_applied_seq", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "5", + type: "ack", + client_request_id: "req-1", + accepted: true, + result: null, + }), + ); + + const runtime = state.threads[chatId]!; + expect(runtime.last_applied_seq).toBe("5"); + }); + + test("ack should reject replayed seq", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // Advance to seq 5 via ack + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "5", + type: "ack", + client_request_id: "req-1", + accepted: true, + result: null, + }), + ); + + // Replay old ack at seq 3 - should be ignored + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "ack", + client_request_id: "req-2", + accepted: true, + result: null, + }), + ); + + expect(state.threads[chatId]!.last_applied_seq).toBe("5"); + }); + + test("ack then old message_added should be rejected", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello", message_id: "m1" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // ack advances watermark to seq 5 + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "5", + type: "ack", + client_request_id: "req-1", + accepted: true, + result: null, + }), + ); + + // Old message_added at seq 4 should be rejected + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "4", + type: "message_added", + message: { + role: "assistant", + content: "Should be rejected", + message_id: "m-stale", + }, + index: 1, + }), + ); + + expect(state.threads[chatId]!.thread.messages).toHaveLength(1); + }); + + test("ide_tool_required should advance last_applied_seq with guard", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + // Advance to seq 7 + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "7", + type: "ide_tool_required", + tool_call_id: "tc-1", + tool_name: "shell", + args: "{}", + }), + ); + expect(state.threads[chatId]!.last_applied_seq).toBe("7"); + + // Replay old seq 3 should be ignored + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "ide_tool_required", + tool_call_id: "tc-2", + tool_name: "shell", + args: "{}", + }), + ); + expect(state.threads[chatId]!.last_applied_seq).toBe("7"); + }); + }); + + describe("message_index_by_id - prototype pollution protection", () => { + test("should safely handle __proto__ as message_id", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "idle", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [ + { role: "user", content: "Test", message_id: "__proto__" }, + { role: "assistant", content: "Reply", message_id: "constructor" }, + ], + }; + + const state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + const runtime = state.threads[chatId]!; + + // Messages should be stored correctly + expect(runtime.thread.messages).toHaveLength(2); + expect(runtime.thread.messages[0].content).toBe("Test"); + expect(runtime.thread.messages[1].content).toBe("Reply"); + + // Index should work without polluting Object prototype + expect(runtime.message_index_by_id).toBeDefined(); + const emptyObj = {}; + // Verify Object.prototype was not polluted + expect(Object.getPrototypeOf(emptyObj)).toBe(Object.prototype); + expect(emptyObj.constructor).toBe(Object); + + // Can update message with __proto__ id without crash + const updateState = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "message_updated", + message_id: "__proto__", + message: { + role: "user", + content: "Updated", + message_id: "__proto__", + }, + }), + ); + expect(updateState.threads[chatId]!.thread.messages[0].content).toBe( + "Updated", + ); + }); + }); + + describe("isStreaming flag transitions", () => { + test("stream_finished should clear streaming flag", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + expect(state.threads[chatId]!.streaming).toBe(true); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-1", + }), + ); + expect(state.threads[chatId]!.streaming).toBe(true); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_delta", + message_id: "msg-1", + ops: [{ op: "append_content", text: "content" }], + }), + ); + expect(state.threads[chatId]!.streaming).toBe(true); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "4", + type: "stream_finished", + message_id: "msg-1", + finish_reason: "stop", + }), + ); + expect(state.threads[chatId]!.streaming).toBe(false); + expect(state.threads[chatId]!.waiting_for_response).toBe(false); + }); + + test("stream_finished with tool_calls should keep waiting_for_response", () => { + const snapshotEvent: ChatEventEnvelope = { + chat_id: chatId, + seq: "1", + type: "snapshot", + thread: { + id: chatId, + title: "Test", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + boost_reasoning: false, + context_tokens_cap: null, + include_project_info: true, + checkpoints_enabled: true, + is_title_generated: false, + }, + runtime: { + state: "generating", + paused: false, + error: null, + queue_size: 0, + pause_reasons: [], + queued_items: [], + }, + messages: [{ role: "user", content: "Hello" }], + }; + + let state = chatReducer(initialState, applyChatEvent(snapshotEvent)); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "2", + type: "stream_started", + message_id: "msg-1", + }), + ); + + state = chatReducer( + state, + applyChatEvent({ + chat_id: chatId, + seq: "3", + type: "stream_finished", + message_id: "msg-1", + finish_reason: "tool_calls", + }), + ); + + const runtime = state.threads[chatId]!; + expect(runtime.streaming).toBe(false); + // tool_calls finish reason means tools are about to execute + expect(runtime.session_state).toBe("executing_tools"); + }); }); }); diff --git a/refact-agent/gui/src/features/Chat/Thread/reducer.ts b/refact-agent/gui/src/features/Chat/Thread/reducer.ts index 32987febc..b3cf25733 100644 --- a/refact-agent/gui/src/features/Chat/Thread/reducer.ts +++ b/refact-agent/gui/src/features/Chat/Thread/reducer.ts @@ -2,24 +2,25 @@ import { createReducer, Draft } from "@reduxjs/toolkit"; import { Chat, ChatThread, + ChatThreadRuntime, IntegrationMeta, ToolUse, - LspChatMode, - chatModeToLspMode, - isLspChatMode, + ChatModeId, + isToolUse, + normalizeLegacyMode, } from "./types"; import { v4 as uuidv4 } from "uuid"; -import { chatResponse, chatAskedQuestion } from "."; +import { getLastThreadParams } from "../../../utils/threadStorage"; import { setToolUse, + setThreadMode, enableSend, clearChatError, setChatModel, setSystemPrompt, newChatAction, + createChatWithId, backUpMessages, - chatError, - doneStreaming, removeChatFromCache, restoreChat, setPreventSend, @@ -30,7 +31,8 @@ import { setIntegrationData, setIsWaitingForResponse, setMaxNewTokens, - setAutomaticPatch, + setAutoApproveEditingTools, + setAutoApproveDangerousCommands, setLastUserMessageId, setEnabledCheckpoints, setBoostReasoning, @@ -40,38 +42,55 @@ import { upsertToolCall, setIncreaseMaxTokens, setAreFollowUpsEnabled, - setIsTitleGenerationEnabled, setIncludeProjectInfo, setContextTokensCap, - setUseCompression, - enqueueUserMessage, - dequeueUserMessage, - clearQueuedMessages, + setReasoningEffort, + setThinkingBudget, + setTemperature, + setFrequencyPenalty, + setMaxTokens, + setParallelToolCalls, + closeThread, + switchToThread, + updateOpenThread, + updateChatRuntimeFromSessionState, + setThreadPauseReasons, + clearThreadPauseReasons, + setThreadConfirmationStatus, + addThreadImage, + removeThreadImageByIndex, + resetThreadImages, + addThreadTextFile, + removeThreadTextFileByIndex, + resetThreadTextFiles, + applyChatEvent, + requestSseRefresh, + clearSseRefreshRequest, + setTaskWidgetExpanded, } from "./actions"; -import { formatChatResponse, postProcessMessagesAfterStreaming } from "./utils"; +import { applyDeltaOps } from "../../../services/refact/chatSubscription"; import { + AssistantMessage, ChatMessages, commandsApi, isAssistantMessage, isDiffMessage, - isMultiModalToolResult, isToolCallMessage, isToolMessage, - isUserMessage, - isUserResponse, ToolCall, + ToolConfirmationPauseReason, ToolMessage, - UserMessage, validateToolCall, + DiffChunk, } from "../../../services/refact"; import { capsApi } from "../../../services/refact"; const createChatThread = ( tool_use: ToolUse, integration?: IntegrationMeta | null, - mode?: LspChatMode, + mode?: ChatModeId, ): ChatThread => { - const chat: ChatThread = { + return { id: uuidv4(), messages: [], title: "", @@ -80,100 +99,189 @@ const createChatThread = ( tool_use, integration, mode, - new_chat_suggested: { - wasSuggested: false, - }, + new_chat_suggested: { wasSuggested: false }, boost_reasoning: false, - automatic_patch: false, increase_max_tokens: false, include_project_info: true, context_tokens_cap: undefined, }; - return chat; }; -type createInitialStateArgs = { - tool_use?: ToolUse; - integration?: IntegrationMeta | null; - maybeMode?: LspChatMode; +const createThreadRuntime = ( + tool_use: ToolUse, + integration?: IntegrationMeta | null, + mode?: ChatModeId, +): ChatThreadRuntime => { + return { + thread: createChatThread(tool_use, integration, mode), + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { + wasInteracted: false, + confirmationStatus: true, + }, + }, + snapshot_received: false, + task_widget_expanded: false, + }; }; const getThreadMode = ({ - tool_use, integration, - maybeMode, -}: createInitialStateArgs) => { - if (integration) { - return "CONFIGURE"; - } - if (maybeMode) { - return maybeMode === "CONFIGURE" ? "AGENT" : maybeMode; - } - - return chatModeToLspMode({ toolUse: tool_use }); +}: { + integration?: IntegrationMeta | null; +}) => { + if (integration) return "configurator"; + return "agent"; }; -const createInitialState = ({ - tool_use = "agent", - integration, - maybeMode, -}: createInitialStateArgs): Chat => { - const mode = getThreadMode({ tool_use, integration, maybeMode }); +const normalizeMessage = (msg: ChatMessages[number]): ChatMessages[number] => { + if (msg.role === "diff" && typeof msg.content === "string") { + try { + const parsed: unknown = JSON.parse(msg.content); + if (Array.isArray(parsed)) { + return { + ...msg, + content: parsed as DiffChunk[], + } as ChatMessages[number]; + } + } catch { + // ignore + } + } + return msg; +}; +const createInitialState = (): Chat => { return { - streaming: false, - thread: createChatThread(tool_use, integration, mode), - error: null, - prevent_send: false, - waiting_for_response: false, - cache: {}, + current_thread_id: "", + open_thread_ids: [], + threads: {}, system_prompt: {}, - tool_use, + tool_use: "agent", checkpoints_enabled: true, - send_immediately: false, - queued_messages: [], + follow_ups_enabled: undefined, + sse_refresh_requested: null, + stream_version: 0, }; }; -const initialState = createInitialState({}); +const initialState = createInitialState(); + +const getRuntime = ( + state: Draft<Chat>, + chatId: string, +): Draft<ChatThreadRuntime> | null => { + return state.threads[chatId] ?? null; +}; + +const getCurrentRuntime = ( + state: Draft<Chat>, +): Draft<ChatThreadRuntime> | null => { + return getRuntime(state, state.current_thread_id); +}; + +function rebuildMessageIndexById( + messages: ChatMessages, +): Record<string, number> { + const index: Record<string, number> = Object.create(null) as Record< + string, + number + >; + for (let i = 0; i < messages.length; i++) { + const msg = messages[i]; + if ("message_id" in msg && msg.message_id) { + index[msg.message_id] = i; + } + } + return index; +} + +function findMessageIndexById( + rt: Draft<ChatThreadRuntime>, + messageId: string, +): number { + const indexed = rt.message_index_by_id?.[messageId]; + if (indexed != null) { + const maybeMsg = rt.thread.messages[indexed]; + if ("message_id" in maybeMsg && maybeMsg.message_id === messageId) { + return indexed; + } + } + return rt.thread.messages.findIndex( + (m) => "message_id" in m && m.message_id === messageId, + ); +} + +function parseEventSeq(seq: string): bigint | null { + if (!/^\d+$/.test(seq)) return null; + try { + return BigInt(seq); + } catch { + return null; + } +} export const chatReducer = createReducer(initialState, (builder) => { builder.addCase(setToolUse, (state, action) => { - state.thread.tool_use = action.payload; state.tool_use = action.payload; - state.thread.mode = chatModeToLspMode({ toolUse: action.payload }); + }); + + builder.addCase(setThreadMode, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt && rt.thread.messages.length === 0) { + rt.thread.mode = action.payload.mode; + const defaults = action.payload.threadDefaults; + if (defaults) { + if (defaults.include_project_info !== undefined) { + rt.thread.include_project_info = defaults.include_project_info; + } + if (defaults.checkpoints_enabled !== undefined) { + rt.thread.checkpoints_enabled = defaults.checkpoints_enabled; + } + if (defaults.auto_approve_editing_tools !== undefined) { + rt.thread.auto_approve_editing_tools = + defaults.auto_approve_editing_tools; + } + if (defaults.auto_approve_dangerous_commands !== undefined) { + rt.thread.auto_approve_dangerous_commands = + defaults.auto_approve_dangerous_commands; + } + } + } }); builder.addCase(setPreventSend, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.prevent_send = true; + const rt = getRuntime(state, action.payload.id); + if (rt) rt.prevent_send = true; }); builder.addCase(enableSend, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.prevent_send = false; + const rt = getRuntime(state, action.payload.id); + if (rt) rt.prevent_send = false; }); builder.addCase(setAreFollowUpsEnabled, (state, action) => { state.follow_ups_enabled = action.payload; }); - builder.addCase(setIsTitleGenerationEnabled, (state, action) => { - state.title_generation_enabled = action.payload; - }); - - builder.addCase(setUseCompression, (state, action) => { - state.use_compression = action.payload; - }); - builder.addCase(clearChatError, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.error = null; + const rt = getRuntime(state, action.payload.id); + if (rt) rt.error = null; }); builder.addCase(setChatModel, (state, action) => { - state.thread.model = action.payload; - state.thread.model = action.payload; + const rt = getCurrentRuntime(state); + if (rt) rt.thread.model = action.payload; }); builder.addCase(setSystemPrompt, (state, action) => { @@ -181,106 +289,141 @@ export const chatReducer = createReducer(initialState, (builder) => { }); builder.addCase(newChatAction, (state, action) => { - const next = createInitialState({ - tool_use: state.tool_use, - maybeMode: state.thread.mode, - }); - next.cache = { ...state.cache }; - if (state.streaming || state.waiting_for_response) { - next.cache[state.thread.id] = { ...state.thread, read: false }; - } - next.thread.model = state.thread.model; - next.system_prompt = state.system_prompt; - next.checkpoints_enabled = state.checkpoints_enabled; - next.follow_ups_enabled = state.follow_ups_enabled; - next.title_generation_enabled = state.title_generation_enabled; - next.use_compression = state.use_compression; - next.thread.boost_reasoning = state.thread.boost_reasoning; - next.queued_messages = []; - // next.thread.automatic_patch = state.thread.automatic_patch; - if (action.payload?.messages) { - next.thread.messages = action.payload.messages; - } - return next; - }); - - builder.addCase(chatResponse, (state, action) => { - if ( - action.payload.id !== state.thread.id && - !(action.payload.id in state.cache) - ) { - return state; + const currentRt = getCurrentRuntime(state); + const mode = getThreadMode({}); + const lastParams = getLastThreadParams(mode); + const newRuntime = createThreadRuntime(state.tool_use, null, mode); + + newRuntime.thread.model = lastParams.model ?? currentRt?.thread.model ?? ""; + newRuntime.thread.boost_reasoning = + lastParams.boost_reasoning ?? currentRt?.thread.boost_reasoning ?? false; + newRuntime.thread.reasoning_effort = lastParams.reasoning_effort; + newRuntime.thread.thinking_budget = lastParams.thinking_budget; + newRuntime.thread.temperature = lastParams.temperature; + newRuntime.thread.max_tokens = lastParams.max_tokens; + newRuntime.thread.increase_max_tokens = + lastParams.increase_max_tokens ?? + currentRt?.thread.increase_max_tokens ?? + false; + newRuntime.thread.include_project_info = + lastParams.include_project_info ?? + currentRt?.thread.include_project_info ?? + true; + newRuntime.thread.context_tokens_cap = + lastParams.context_tokens_cap ?? currentRt?.thread.context_tokens_cap; + + if (action.payload?.title) { + newRuntime.thread.title = action.payload.title; } - if (action.payload.id in state.cache) { - const thread = state.cache[action.payload.id]; - // TODO: this might not be needed any more, because we can mutate the last message. - const messages = formatChatResponse(thread.messages, action.payload); - thread.messages = messages; - return state; - } + const newId = newRuntime.thread.id; + state.threads[newId] = newRuntime; + state.open_thread_ids.push(newId); + state.current_thread_id = newId; + }); - const messages = formatChatResponse(state.thread.messages, action.payload); + builder.addCase(createChatWithId, (state, action) => { + const { id, title, isTaskChat, mode, taskMeta, model } = action.payload; + const existingRt = state.threads[id]; - state.thread.messages = messages; - state.streaming = true; - state.waiting_for_response = false; + if (existingRt) { + if (isTaskChat) { + existingRt.thread.is_task_chat = true; + state.open_thread_ids = state.open_thread_ids.filter( + (tid) => tid !== id, + ); + } + if (title && !existingRt.thread.title) { + existingRt.thread.title = title; + } + if (mode) { + existingRt.thread.mode = normalizeLegacyMode(mode); + } + if (taskMeta) { + existingRt.thread.task_meta = taskMeta; + } + if (model && !existingRt.thread.model) { + existingRt.thread.model = model; + } + state.current_thread_id = id; + return; + } - if ( - isUserResponse(action.payload) && - action.payload.compression_strength && - action.payload.compression_strength !== "absent" - ) { - state.thread.new_chat_suggested = { - wasRejectedByUser: false, - wasSuggested: true, - }; + const currentRt = getCurrentRuntime(state); + const effectiveMode = mode ?? getThreadMode({}); + const lastParams = getLastThreadParams(effectiveMode); + const newRuntime = createThreadRuntime("agent", null, effectiveMode); + + newRuntime.thread.id = id; + newRuntime.thread.model = + model ?? lastParams.model ?? currentRt?.thread.model ?? ""; + newRuntime.thread.boost_reasoning = + lastParams.boost_reasoning ?? currentRt?.thread.boost_reasoning ?? false; + newRuntime.thread.reasoning_effort = lastParams.reasoning_effort; + newRuntime.thread.thinking_budget = lastParams.thinking_budget; + newRuntime.thread.temperature = lastParams.temperature; + newRuntime.thread.max_tokens = lastParams.max_tokens; + newRuntime.thread.increase_max_tokens = + lastParams.increase_max_tokens ?? + currentRt?.thread.increase_max_tokens ?? + false; + newRuntime.thread.include_project_info = + lastParams.include_project_info ?? + currentRt?.thread.include_project_info ?? + true; + newRuntime.thread.context_tokens_cap = + lastParams.context_tokens_cap ?? currentRt?.thread.context_tokens_cap; + + if (title) { + newRuntime.thread.title = title; + } + if (isTaskChat) { + newRuntime.thread.is_task_chat = true; } + if (taskMeta) { + newRuntime.thread.task_meta = taskMeta; + } + + state.threads[id] = newRuntime; + if (!isTaskChat) { + state.open_thread_ids.push(id); + } + state.current_thread_id = id; }); builder.addCase(backUpMessages, (state, action) => { - // TODO: should it also save to history? - state.error = null; - // state.previous_message_length = state.thread.messages.length; - state.thread.messages = action.payload.messages; - }); - - builder.addCase(chatError, (state, action) => { - state.streaming = false; - state.prevent_send = true; - state.waiting_for_response = false; - state.error = action.payload.message; - }); - - builder.addCase(doneStreaming, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.streaming = false; - state.waiting_for_response = false; - state.thread.read = true; - state.thread.messages = postProcessMessagesAfterStreaming( - state.thread.messages, - ); + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.error = null; + rt.thread.messages = action.payload.messages; + } + }); + + builder.addCase(setAutoApproveEditingTools, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.auto_approve_editing_tools = action.payload.value; }); - builder.addCase(setAutomaticPatch, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.automatic_patch = action.payload.value; + builder.addCase(setAutoApproveDangerousCommands, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.auto_approve_dangerous_commands = action.payload.value; }); builder.addCase(setIsNewChatSuggested, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.new_chat_suggested = { - wasSuggested: action.payload.value, - }; + const rt = getRuntime(state, action.payload.chatId); + if (rt) + rt.thread.new_chat_suggested = { wasSuggested: action.payload.value }; }); builder.addCase(setIsNewChatSuggestionRejected, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.prevent_send = false; - state.thread.new_chat_suggested = { - ...state.thread.new_chat_suggested, - wasRejectedByUser: action.payload.value, - }; + const rt = getRuntime(state, action.payload.chatId); + if (rt) { + rt.prevent_send = false; + rt.thread.new_chat_suggested = { + ...rt.thread.new_chat_suggested, + wasRejectedByUser: action.payload.value, + }; + } }); builder.addCase(setEnabledCheckpoints, (state, action) => { @@ -288,194 +431,338 @@ export const chatReducer = createReducer(initialState, (builder) => { }); builder.addCase(setBoostReasoning, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.boost_reasoning = action.payload.value; + const rt = getRuntime(state, action.payload.chatId); + if (rt) { + rt.thread.boost_reasoning = action.payload.value; + // Reasoning implies temperature must be unset (treated as "None"). + if (action.payload.value) { + rt.thread.temperature = undefined; + } + } }); - builder.addCase(setLastUserMessageId, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.last_user_message_id = action.payload.messageId; + builder.addCase(setReasoningEffort, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) { + rt.thread.reasoning_effort = action.payload.value ?? undefined; + // Any explicit reasoning effort implies reasoning mode: unset temperature. + if (action.payload.value != null) { + rt.thread.temperature = undefined; + } + } + }); + + builder.addCase(setThinkingBudget, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) { + rt.thread.thinking_budget = action.payload.value ?? undefined; + // Any explicit thinking budget implies reasoning mode: unset temperature. + if (action.payload.value != null) { + rt.thread.temperature = undefined; + } + } + }); + + builder.addCase(setTemperature, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.temperature = action.payload.value ?? undefined; + }); + + builder.addCase(setFrequencyPenalty, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.frequency_penalty = action.payload.value ?? undefined; + }); + + builder.addCase(setMaxTokens, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.max_tokens = action.payload.value ?? undefined; + }); + + builder.addCase(setParallelToolCalls, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.parallel_tool_calls = action.payload.value ?? undefined; }); - builder.addCase(chatAskedQuestion, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.send_immediately = false; - state.waiting_for_response = true; - state.thread.read = false; - state.prevent_send = false; + builder.addCase(setTaskWidgetExpanded, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) rt.task_widget_expanded = action.payload.expanded; + }); + + builder.addCase(setLastUserMessageId, (state, action) => { + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.last_user_message_id = action.payload.messageId; }); builder.addCase(removeChatFromCache, (state, action) => { - if (!(action.payload.id in state.cache)) return state; + const id = action.payload.id; + const rt = state.threads[id]; + if (rt && !rt.streaming && !rt.confirmation.pause) { + const { [id]: _, ...rest } = state.threads; + state.threads = rest; + state.open_thread_ids = state.open_thread_ids.filter((tid) => tid !== id); + } + }); - const cache = Object.entries(state.cache).reduce< - Record<string, ChatThread> - >((acc, cur) => { - if (cur[0] === action.payload.id) return acc; - return { ...acc, [cur[0]]: cur[1] }; - }, {}); - state.cache = cache; + builder.addCase(closeThread, (state, action) => { + const id = action.payload.id; + const force = action.payload.force ?? false; + state.open_thread_ids = state.open_thread_ids.filter((tid) => tid !== id); + const rt = state.threads[id]; + if ( + rt && + (force || + (!rt.streaming && !rt.waiting_for_response && !rt.confirmation.pause)) + ) { + const { [id]: _, ...rest } = state.threads; + state.threads = rest; + } + if (state.current_thread_id === id) { + state.current_thread_id = state.open_thread_ids[0] ?? ""; + } }); builder.addCase(restoreChat, (state, action) => { - if (state.thread.id === action.payload.id) return state; - const mostUptoDateThread = - action.payload.id in state.cache - ? { ...state.cache[action.payload.id] } - : { ...action.payload, read: true }; - - state.error = null; - state.waiting_for_response = false; - - if (state.streaming) { - state.cache[state.thread.id] = { ...state.thread, read: false }; - } - if (action.payload.id in state.cache) { - const { [action.payload.id]: _, ...rest } = state.cache; - state.cache = rest; - state.streaming = true; - } else { - state.streaming = false; - } - state.prevent_send = true; - state.thread = { - new_chat_suggested: { wasSuggested: false }, - ...mostUptoDateThread, - }; - state.thread.messages = postProcessMessagesAfterStreaming( - state.thread.messages, - ); - state.thread.tool_use = state.thread.tool_use ?? state.tool_use; - if (action.payload.mode && !isLspChatMode(action.payload.mode)) { - state.thread.mode = "AGENT"; + const existingRt = getRuntime(state, action.payload.id); + if (existingRt) { + if (!state.open_thread_ids.includes(action.payload.id)) { + state.open_thread_ids.push(action.payload.id); + } + state.current_thread_id = action.payload.id; + // Don't reset snapshot_received - thread was already hydrated + return; } - const lastUserMessage = action.payload.messages.reduce<UserMessage | null>( - (acc, cur) => { - if (isUserMessage(cur)) return cur; - return acc; + const mode = normalizeLegacyMode(action.payload.mode); + const newRuntime: ChatThreadRuntime = { + thread: { + id: action.payload.id, + messages: [], + model: action.payload.model, + title: action.payload.title, + tool_use: action.payload.tool_use ?? state.tool_use, + mode, + new_chat_suggested: { wasSuggested: false }, }, - null, - ); + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { + wasInteracted: false, + confirmationStatus: true, + }, + }, + snapshot_received: false, + task_widget_expanded: false, + }; - if ( - lastUserMessage?.compression_strength && - lastUserMessage.compression_strength !== "absent" - ) { - state.thread.new_chat_suggested = { - wasRejectedByUser: false, - wasSuggested: true, - }; + state.threads[action.payload.id] = newRuntime; + if (!state.open_thread_ids.includes(action.payload.id)) { + state.open_thread_ids.push(action.payload.id); } + state.current_thread_id = action.payload.id; }); - // New builder to save chat title within the current thread and not only inside of a history thread - builder.addCase(saveTitle, (state, action) => { - if (state.thread.id !== action.payload.id) return state; - state.thread.title = action.payload.title; - state.thread.isTitleGenerated = action.payload.isTitleGenerated; - }); + builder.addCase(switchToThread, (state, action) => { + const { id, openTab } = action.payload; + const existingRt = getRuntime(state, id); - builder.addCase(newIntegrationChat, (state, action) => { - // TODO: find out about tool use - // TODO: should be CONFIGURE ? - const next = createInitialState({ - tool_use: "agent", - integration: action.payload.integration, - maybeMode: "CONFIGURE", - }); - next.thread.last_user_message_id = action.payload.request_attempt_id; - next.thread.integration = action.payload.integration; - next.thread.messages = action.payload.messages; + if (!existingRt) { + // eslint-disable-next-line no-console + console.warn(`[switchToThread] No runtime for ${id}`); + } - next.thread.model = state.thread.model; - next.system_prompt = state.system_prompt; - next.cache = { ...state.cache }; - if (state.streaming) { - next.cache[state.thread.id] = { ...state.thread, read: false }; + if (existingRt) { + const shouldOpenTab = + openTab !== false && !existingRt.thread.is_task_chat; + if (shouldOpenTab && !state.open_thread_ids.includes(id)) { + state.open_thread_ids.push(id); + } + state.current_thread_id = id; } - return next; }); - builder.addCase(setSendImmediately, (state, action) => { - state.send_immediately = action.payload; + builder.addCase(updateOpenThread, (state, action) => { + const existingRt = getRuntime(state, action.payload.id); + if (!existingRt) return; + + const incomingTitle = action.payload.thread.title; + const incomingGenerated = action.payload.thread.isTitleGenerated; + + if (incomingTitle) { + if (incomingGenerated === true) { + if (!existingRt.thread.isTitleGenerated) { + existingRt.thread.title = incomingTitle; + existingRt.thread.isTitleGenerated = true; + } + } else if (incomingGenerated === false) { + existingRt.thread.title = incomingTitle; + existingRt.thread.isTitleGenerated = false; + } + } + + const isCurrentThread = action.payload.id === state.current_thread_id; + if ( + !existingRt.streaming && + !existingRt.waiting_for_response && + !existingRt.error && + !isCurrentThread + ) { + const { + title: _title, + isTitleGenerated: _isTitleGenerated, + messages: _messages, + ...otherFields + } = action.payload.thread; + existingRt.thread = { + ...existingRt.thread, + ...otherFields, + }; + } }); - builder.addCase(enqueueUserMessage, (state, action) => { - const { priority, ...rest } = action.payload; - const messagePayload = { ...rest, priority }; - if (priority) { - // Insert at front for "send next" (next available turn) - // Find the position after existing priority messages (stable FIFO among priority) - const insertAt = state.queued_messages.findIndex((m) => !m.priority); - if (insertAt === -1) { - state.queued_messages.push(messagePayload); - } else { - state.queued_messages.splice(insertAt, 0, messagePayload); + builder.addCase(updateChatRuntimeFromSessionState, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (!rt) return; + + const sessionState = action.payload.session_state; + + // When a thread has an active chat SSE subscription (snapshot_received), + // the chat SSE channel (applyChatEvent → runtime_updated) is the + // authoritative source for runtime state. The sidebar SSE can deliver + // stale trajectory events (e.g. "generating") that arrive AFTER the chat + // SSE has already moved to "waiting_user_input" or "completed", causing + // boolean flags to be incorrectly overwritten. Skip boolean/flag updates + // for threads with an active chat SSE; only update session_state for + // display purposes (tabs, StatusDot). + if (rt.snapshot_received) { + // Keep the last known session_state for display (tabs/StatusDot), but do + // not overwrite streaming/waiting flags. + rt.session_state = sessionState; + if (sessionState === "error") { + rt.error = action.payload.error ?? "An error occurred"; + } + return; + } + + rt.session_state = sessionState; + rt.streaming = sessionState === "generating"; + rt.waiting_for_response = + sessionState === "generating" || + sessionState === "executing_tools" || + sessionState === "waiting_ide"; + rt.prevent_send = false; + + if (sessionState === "paused") { + rt.confirmation.pause = true; + if (rt.confirmation.pause_reasons.length === 0) { + state.sse_refresh_requested = action.payload.id; } - } else { - state.queued_messages.push(messagePayload); + } else if ( + sessionState === "idle" || + sessionState === "error" || + sessionState === "completed" || + sessionState === "waiting_user_input" + ) { + rt.confirmation.pause = false; + rt.confirmation.pause_reasons = []; + } + + if (sessionState === "error") { + rt.error = action.payload.error ?? "An error occurred"; + } else if ( + sessionState === "idle" || + sessionState === "completed" || + sessionState === "waiting_user_input" + ) { + rt.error = null; + } + }); + + builder.addCase(saveTitle, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.thread.title = action.payload.title; + rt.thread.isTitleGenerated = action.payload.isTitleGenerated; } }); - builder.addCase(dequeueUserMessage, (state, action) => { - state.queued_messages = state.queued_messages.filter( - (q) => q.id !== action.payload.queuedId, + builder.addCase(newIntegrationChat, (state, action) => { + const currentRt = getCurrentRuntime(state); + const newRuntime = createThreadRuntime( + "agent", + action.payload.integration, + "configurator", ); + newRuntime.thread.last_user_message_id = action.payload.request_attempt_id; + newRuntime.thread.messages = action.payload.messages; + if (currentRt) { + newRuntime.thread.model = currentRt.thread.model; + } + + const newId = newRuntime.thread.id; + state.threads[newId] = newRuntime; + state.open_thread_ids.push(newId); + state.current_thread_id = newId; }); - builder.addCase(clearQueuedMessages, (state) => { - state.queued_messages = []; + builder.addCase(setSendImmediately, (state, action) => { + const rt = getCurrentRuntime(state); + if (rt) rt.send_immediately = action.payload; }); builder.addCase(setChatMode, (state, action) => { - state.thread.mode = action.payload; + const rt = getCurrentRuntime(state); + if (rt) rt.thread.mode = action.payload; }); builder.addCase(setIntegrationData, (state, action) => { - state.thread.integration = action.payload; + const rt = getCurrentRuntime(state); + if (rt) rt.thread.integration = action.payload; }); builder.addCase(setIsWaitingForResponse, (state, action) => { - state.waiting_for_response = action.payload; + const rt = getRuntime(state, action.payload.id); + if (rt) rt.waiting_for_response = action.payload.value; }); - // TBD: should be safe to remove? builder.addCase(setMaxNewTokens, (state, action) => { - state.thread.currentMaximumContextTokens = action.payload; - // Also adjust context_tokens_cap if it exceeds the new max - if ( - state.thread.context_tokens_cap === undefined || - state.thread.context_tokens_cap > action.payload - ) { - state.thread.context_tokens_cap = action.payload; + const rt = getCurrentRuntime(state); + if (rt) { + rt.thread.currentMaximumContextTokens = action.payload; + if ( + rt.thread.context_tokens_cap === undefined || + rt.thread.context_tokens_cap > action.payload + ) { + rt.thread.context_tokens_cap = action.payload; + } } }); builder.addCase(fixBrokenToolMessages, (state, action) => { - if (action.payload.id !== state.thread.id) return state; - if (state.thread.messages.length === 0) return state; - const lastMessage = state.thread.messages[state.thread.messages.length - 1]; - if (!isToolCallMessage(lastMessage)) return state; - if (lastMessage.tool_calls.every(validateToolCall)) return state; + const rt = getRuntime(state, action.payload.id); + if (!rt || rt.thread.messages.length === 0) return; + const lastMessage = rt.thread.messages[rt.thread.messages.length - 1]; + if (!isToolCallMessage(lastMessage)) return; + if (lastMessage.tool_calls.every(validateToolCall)) return; const validToolCalls = lastMessage.tool_calls.filter(validateToolCall); - const messages = state.thread.messages.slice(0, -1); + const messages = rt.thread.messages.slice(0, -1); const newMessage = { ...lastMessage, tool_calls: validToolCalls }; - state.thread.messages = [...messages, newMessage]; + rt.thread.messages = [...messages, newMessage]; }); builder.addCase(upsertToolCall, (state, action) => { - // if (action.payload.toolCallId !== state.thread.id && !(action.payload.chatId in state.cache)) return state; - if (action.payload.chatId === state.thread.id) { - maybeAppendToolCallResultFromIdeToMessages( - state.thread.messages, - action.payload.toolCallId, - action.payload.accepted, - ); - } else if (action.payload.chatId in state.cache) { - const thread = state.cache[action.payload.chatId]; + const rt = getRuntime(state, action.payload.chatId); + if (rt) { maybeAppendToolCallResultFromIdeToMessages( - thread.messages, + rt.thread.messages, action.payload.toolCallId, action.payload.accepted, action.payload.replaceOnly, @@ -484,38 +771,729 @@ export const chatReducer = createReducer(initialState, (builder) => { }); builder.addCase(setIncreaseMaxTokens, (state, action) => { - state.thread.increase_max_tokens = action.payload; + const rt = getCurrentRuntime(state); + if (rt) rt.thread.increase_max_tokens = action.payload; }); builder.addCase(setIncludeProjectInfo, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.include_project_info = action.payload.value; + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.include_project_info = action.payload.value; }); builder.addCase(setContextTokensCap, (state, action) => { - if (state.thread.id !== action.payload.chatId) return state; - state.thread.context_tokens_cap = action.payload.value; + const rt = getRuntime(state, action.payload.chatId); + if (rt) rt.thread.context_tokens_cap = action.payload.value; + }); + + builder.addCase(setThreadPauseReasons, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.confirmation.pause = true; + rt.confirmation.pause_reasons = action.payload.pauseReasons; + rt.confirmation.status.wasInteracted = false; + rt.confirmation.status.confirmationStatus = false; + rt.streaming = false; + rt.waiting_for_response = false; + } + }); + + builder.addCase(clearThreadPauseReasons, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.confirmation.pause = false; + rt.confirmation.pause_reasons = []; + } + }); + + builder.addCase(setThreadConfirmationStatus, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.confirmation.status.wasInteracted = action.payload.wasInteracted; + rt.confirmation.status.confirmationStatus = + action.payload.confirmationStatus; + } + }); + + builder.addCase(addThreadImage, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt && rt.attached_images.length < 5) { + rt.attached_images.push(action.payload.image); + } + }); + + builder.addCase(removeThreadImageByIndex, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.attached_images = rt.attached_images.filter( + (_, index) => index !== action.payload.index, + ); + } + }); + + builder.addCase(resetThreadImages, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.attached_images = []; + } + }); + + builder.addCase(addThreadTextFile, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.attached_text_files.push(action.payload.file); + } + }); + + builder.addCase(removeThreadTextFileByIndex, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.attached_text_files = rt.attached_text_files.filter( + (_, index) => index !== action.payload.index, + ); + } + }); + + builder.addCase(resetThreadTextFiles, (state, action) => { + const rt = getRuntime(state, action.payload.id); + if (rt) { + rt.attached_text_files = []; + } + }); + + builder.addCase(applyChatEvent, (state, action) => { + const { chat_id, ...event } = action.payload; + + const rt = getRuntime(state, chat_id); + + switch (event.type) { + case "snapshot": { + const existingRuntime = rt; + const existing = existingRuntime?.thread; + const snapshotMessages = (event.messages as ChatMessages).map( + normalizeMessage, + ); + + const backendModel = event.thread.model.trim(); + const backendToolUse = event.thread.tool_use; + const backendMode = event.thread.mode; + + const snapshotTaskMeta = event.thread.task_meta ?? existing?.task_meta; + const isTaskChat = + Boolean(existing?.is_task_chat) || Boolean(snapshotTaskMeta?.task_id); + + const snapshotTitle = event.thread.title; + const existingTitle = existingRuntime?.thread.title; + const snapshotTitleGenerated = event.thread.is_title_generated; + const existingTitleGenerated = + existingRuntime?.thread.isTitleGenerated === true; + const useSnapshotTitle = + !existingTitle || + existingTitle === "New Chat" || + (snapshotTitleGenerated && !existingTitleGenerated); + + const thread: ChatThread = { + id: event.thread.id, + messages: snapshotMessages, + model: backendModel || (existing?.model ?? ""), + title: useSnapshotTitle ? snapshotTitle : existingTitle, + tool_use: isToolUse(backendToolUse) + ? backendToolUse + : existing?.tool_use && isToolUse(existing.tool_use) + ? existing.tool_use + : "agent", + mode: normalizeLegacyMode(backendMode || existing?.mode), + boost_reasoning: event.thread.boost_reasoning, + context_tokens_cap: + event.thread.context_tokens_cap ?? existing?.context_tokens_cap, + include_project_info: event.thread.include_project_info, + checkpoints_enabled: event.thread.checkpoints_enabled, + isTitleGenerated: + existingRuntime?.thread.isTitleGenerated ?? + event.thread.is_title_generated, + auto_approve_editing_tools: + event.thread.auto_approve_editing_tools ?? + existing?.auto_approve_editing_tools ?? + false, + auto_approve_dangerous_commands: + event.thread.auto_approve_dangerous_commands ?? + existing?.auto_approve_dangerous_commands ?? + false, + increase_max_tokens: existing?.increase_max_tokens ?? false, + new_chat_suggested: { wasSuggested: false }, + is_task_chat: isTaskChat, + task_meta: snapshotTaskMeta, + reasoning_effort: + "reasoning_effort" in event.thread + ? (event.thread + .reasoning_effort as ChatThread["reasoning_effort"]) + : existing?.reasoning_effort, + thinking_budget: + "thinking_budget" in event.thread + ? (event.thread.thinking_budget as number | undefined) + : existing?.thinking_budget, + temperature: + "temperature" in event.thread + ? (event.thread.temperature as number | undefined) + : existing?.temperature, + frequency_penalty: + "frequency_penalty" in event.thread + ? (event.thread.frequency_penalty as number | undefined) + : existing?.frequency_penalty, + max_tokens: + "max_tokens" in event.thread + ? (event.thread.max_tokens as number | undefined) + : existing?.max_tokens, + parallel_tool_calls: + "parallel_tool_calls" in event.thread + ? (event.thread.parallel_tool_calls as boolean | undefined) + : existing?.parallel_tool_calls, + }; + + const snapshotState = event.runtime.state as string; + const snapshotStreaming = snapshotState === "generating"; + const snapshotWaiting = + snapshotState === "generating" || + snapshotState === "executing_tools" || + snapshotState === "waiting_ide"; + + const newRt: ChatThreadRuntime = { + thread, + session_state: snapshotState, + streaming: snapshotStreaming, + waiting_for_response: snapshotWaiting, + prevent_send: false, + error: event.runtime.error ?? null, + queued_items: event.runtime + .queued_items as ChatThreadRuntime["queued_items"], + send_immediately: existingRuntime?.send_immediately ?? false, + attached_images: existingRuntime?.attached_images ?? [], + attached_text_files: existingRuntime?.attached_text_files ?? [], + confirmation: { + pause: event.runtime.paused, + pause_reasons: event.runtime + .pause_reasons as ToolConfirmationPauseReason[], + status: existingRuntime?.confirmation.status ?? { + wasInteracted: false, + confirmationStatus: true, + }, + }, + snapshot_received: true, + task_widget_expanded: existingRuntime?.task_widget_expanded ?? false, + last_applied_seq: event.seq, + message_index_by_id: rebuildMessageIndexById(snapshotMessages), + }; + + state.threads[chat_id] = newRt; + + if (!isTaskChat && !state.open_thread_ids.includes(chat_id)) { + state.open_thread_ids.push(chat_id); + } + if (!state.current_thread_id) { + state.current_thread_id = chat_id; + } + break; + } + + case "thread_updated": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const { type: _, ...params } = event; + if ("model" in params && typeof params.model === "string") + rt.thread.model = params.model; + if ("mode" in params && typeof params.mode === "string") { + rt.thread.mode = normalizeLegacyMode(params.mode); + } + if ( + "boost_reasoning" in params && + typeof params.boost_reasoning === "boolean" + ) + rt.thread.boost_reasoning = params.boost_reasoning; + if ("tool_use" in params && typeof params.tool_use === "string") { + rt.thread.tool_use = isToolUse(params.tool_use) + ? params.tool_use + : rt.thread.tool_use; + } + if ("context_tokens_cap" in params) { + rt.thread.context_tokens_cap = + params.context_tokens_cap == null + ? undefined + : (params.context_tokens_cap as number); + } + if ( + "include_project_info" in params && + typeof params.include_project_info === "boolean" + ) + rt.thread.include_project_info = params.include_project_info; + if ( + "checkpoints_enabled" in params && + typeof params.checkpoints_enabled === "boolean" + ) + rt.thread.checkpoints_enabled = params.checkpoints_enabled; + if ( + "auto_approve_editing_tools" in params && + typeof params.auto_approve_editing_tools === "boolean" + ) + rt.thread.auto_approve_editing_tools = + params.auto_approve_editing_tools; + if ( + "auto_approve_dangerous_commands" in params && + typeof params.auto_approve_dangerous_commands === "boolean" + ) + rt.thread.auto_approve_dangerous_commands = + params.auto_approve_dangerous_commands; + if ("reasoning_effort" in params) { + rt.thread.reasoning_effort = + params.reasoning_effort == null + ? undefined + : (params.reasoning_effort as ChatThread["reasoning_effort"]); + } + if ("thinking_budget" in params) { + rt.thread.thinking_budget = + params.thinking_budget == null + ? undefined + : (params.thinking_budget as number); + } + if ("temperature" in params) { + rt.thread.temperature = + params.temperature == null + ? undefined + : (params.temperature as number); + } + if ("frequency_penalty" in params) { + rt.thread.frequency_penalty = + params.frequency_penalty == null + ? undefined + : (params.frequency_penalty as number); + } + if ("max_tokens" in params) { + rt.thread.max_tokens = + params.max_tokens == null + ? undefined + : (params.max_tokens as number); + } + if ("parallel_tool_calls" in params) { + rt.thread.parallel_tool_calls = + params.parallel_tool_calls == null + ? undefined + : (params.parallel_tool_calls as boolean); + } + if ("task_meta" in params && params.task_meta != null) { + rt.thread.task_meta = params.task_meta as ChatThread["task_meta"]; + rt.thread.is_task_chat = true; + state.open_thread_ids = state.open_thread_ids.filter( + (id) => id !== chat_id, + ); + } + rt.last_applied_seq = event.seq; + break; + } + + case "message_added": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const msg = normalizeMessage(event.message); + const messageId = "message_id" in msg ? msg.message_id : null; + if (messageId) { + const existingIdx = findMessageIndexById(rt, messageId); + if (existingIdx >= 0) { + const existing = rt.thread.messages[existingIdx]; + if (isAssistantMessage(existing) && isAssistantMessage(msg)) { + const merged: AssistantMessage = { + ...msg, + tool_calls: msg.tool_calls ?? existing.tool_calls, + server_executed_tools: + msg.server_executed_tools ?? existing.server_executed_tools, + server_content_blocks: + msg.server_content_blocks ?? existing.server_content_blocks, + reasoning_content: + msg.reasoning_content ?? existing.reasoning_content, + thinking_blocks: + msg.thinking_blocks ?? existing.thinking_blocks, + citations: msg.citations ?? existing.citations, + usage: msg.usage ?? existing.usage, + extra: msg.extra ?? existing.extra, + finish_reason: msg.finish_reason ?? existing.finish_reason, + }; + rt.thread.messages[existingIdx] = merged; + } else { + rt.thread.messages[existingIdx] = msg; + } + rt.message_index_by_id = rebuildMessageIndexById( + rt.thread.messages, + ); + rt.last_applied_seq = event.seq; + break; + } + } + const clampedIndex = Math.max( + 0, + Math.min(event.index, rt.thread.messages.length), + ); + rt.thread.messages.splice(clampedIndex, 0, msg); + rt.message_index_by_id = rebuildMessageIndexById(rt.thread.messages); + rt.last_applied_seq = event.seq; + break; + } + + case "message_updated": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const idx = findMessageIndexById(rt, event.message_id); + if (idx >= 0) { + rt.thread.messages[idx] = normalizeMessage(event.message); + rt.message_index_by_id = rebuildMessageIndexById(rt.thread.messages); + } + rt.last_applied_seq = event.seq; + break; + } + + case "message_removed": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.thread.messages = rt.thread.messages.filter( + (m) => !("message_id" in m) || m.message_id !== event.message_id, + ); + rt.message_index_by_id = rebuildMessageIndexById(rt.thread.messages); + rt.last_applied_seq = event.seq; + break; + } + + case "messages_truncated": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const clampedIndex = Math.max( + 0, + Math.min(event.from_index, rt.thread.messages.length), + ); + rt.thread.messages = rt.thread.messages.slice(0, clampedIndex); + rt.message_index_by_id = rebuildMessageIndexById(rt.thread.messages); + rt.last_applied_seq = event.seq; + break; + } + + case "stream_started": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const existingIdx = findMessageIndexById(rt, event.message_id); + rt.streaming = true; + rt.waiting_for_response = true; + rt.session_state = "generating"; + if (existingIdx < 0) { + rt.thread.messages.push({ + role: "assistant", + content: "", + message_id: event.message_id, + } as ChatMessages[number]); + rt.message_index_by_id = rebuildMessageIndexById(rt.thread.messages); + } + rt.last_applied_seq = event.seq; + break; + } + + case "stream_delta": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const msgIdx = findMessageIndexById(rt, event.message_id); + if (msgIdx >= 0) { + const msg = rt.thread.messages[msgIdx]; + rt.thread.messages[msgIdx] = applyDeltaOps( + msg as Parameters<typeof applyDeltaOps>[0], + event.ops, + ); + state.stream_version = (state.stream_version + 1) % 1_000_000; + } + rt.last_applied_seq = event.seq; + break; + } + + case "stream_finished": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.streaming = false; + if ( + event.finish_reason === "stop" || + event.finish_reason === "length" || + event.finish_reason === "abort" || + event.finish_reason === "error" + ) { + rt.waiting_for_response = false; + rt.session_state = "idle"; + } else { + // tool_calls or other finish reasons: tools about to execute + rt.session_state = "executing_tools"; + } + const msgIdx = findMessageIndexById(rt, event.message_id); + if (msgIdx >= 0 && isAssistantMessage(rt.thread.messages[msgIdx])) { + const msg = rt.thread.messages[msgIdx] as AssistantMessage; + if (event.finish_reason && !msg.finish_reason) { + msg.finish_reason = + event.finish_reason as AssistantMessage["finish_reason"]; + } + } + rt.last_applied_seq = event.seq; + break; + } + + case "pause_required": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.streaming = false; + rt.waiting_for_response = false; + rt.session_state = "paused"; + rt.confirmation.pause = true; + rt.confirmation.pause_reasons = + event.reasons as ToolConfirmationPauseReason[]; + rt.confirmation.status.wasInteracted = false; + rt.confirmation.status.confirmationStatus = false; + rt.last_applied_seq = event.seq; + break; + } + + case "pause_cleared": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.confirmation.pause = false; + rt.confirmation.pause_reasons = []; + rt.confirmation.status.wasInteracted = false; + rt.confirmation.status.confirmationStatus = true; + rt.last_applied_seq = event.seq; + break; + } + + case "ide_tool_required": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.last_applied_seq = event.seq; + break; + } + + case "subchat_update": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + for (const msg of rt.thread.messages) { + if (!isAssistantMessage(msg) || !msg.tool_calls) continue; + const tc = msg.tool_calls.find((t) => t.id === event.tool_call_id); + if (tc) { + if (event.subchat_id === "") { + tc.subchat = undefined; + tc.subchat_log = []; + tc.attached_files = []; + } else { + tc.subchat = event.subchat_id; + const isToolNotification = event.subchat_id.includes("/tool:"); + if (!isToolNotification) { + // Streaming progress: keep only the latest entry so UI doesn't + // accumulate stale partial text. + tc.subchat_log = [event.subchat_id]; + } + } + if (event.attached_files && event.attached_files.length > 0) { + tc.attached_files = [ + ...(tc.attached_files ?? []), + ...event.attached_files.filter( + (f) => !tc.attached_files?.includes(f), + ), + ]; + } + break; + } + } + rt.last_applied_seq = event.seq; + break; + } + + case "ack": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.last_applied_seq = event.seq; + break; + } + + case "queue_updated": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + rt.queued_items = + event.queued_items as ChatThreadRuntime["queued_items"]; + rt.last_applied_seq = event.seq; + break; + } + + case "runtime_updated": { + if (!rt) break; + const eventSeq = parseEventSeq(event.seq); + const lastSeq = + rt.last_applied_seq != null + ? parseEventSeq(rt.last_applied_seq) + : null; + if (eventSeq != null && lastSeq != null && eventSeq <= lastSeq) { + break; + } + const newState = event.state; + rt.session_state = newState; + + // Update streaming/waiting flags based on state + switch (newState) { + case "idle": + case "completed": + case "waiting_user_input": + case "error": + rt.streaming = false; + rt.waiting_for_response = false; + break; + case "generating": + rt.streaming = true; + rt.waiting_for_response = true; + break; + case "executing_tools": + case "waiting_ide": + rt.streaming = false; + rt.waiting_for_response = true; + break; + case "paused": + rt.streaming = false; + rt.waiting_for_response = false; + // Note: pause_reasons are set via pause_required event + break; + } + + // Update error state + if (newState === "error" && event.error) { + rt.error = event.error; + } else if (newState !== "error") { + rt.error = null; + } + rt.last_applied_seq = event.seq; + break; + } + } + }); + + builder.addCase(requestSseRefresh, (state, action) => { + state.sse_refresh_requested = action.payload.chatId; + }); + + builder.addCase(clearSseRefreshRequest, (state) => { + state.sse_refresh_requested = null; }); builder.addMatcher( capsApi.endpoints.getCaps.matchFulfilled, (state, action) => { const defaultModel = action.payload.chat_default_model; + const rt = getCurrentRuntime(state); + if (!rt) return; - const model = state.thread.model || defaultModel; + const model = rt.thread.model || defaultModel; if (!(model in action.payload.chat_models)) return; const currentModelMaximumContextTokens = action.payload.chat_models[model].n_ctx; - state.thread.currentMaximumContextTokens = - currentModelMaximumContextTokens; + rt.thread.currentMaximumContextTokens = currentModelMaximumContextTokens; if ( - state.thread.context_tokens_cap === undefined || - state.thread.context_tokens_cap > currentModelMaximumContextTokens + rt.thread.context_tokens_cap === undefined || + rt.thread.context_tokens_cap > currentModelMaximumContextTokens ) { - state.thread.context_tokens_cap = currentModelMaximumContextTokens; + rt.thread.context_tokens_cap = currentModelMaximumContextTokens; } }, ); @@ -523,8 +1501,11 @@ export const chatReducer = createReducer(initialState, (builder) => { builder.addMatcher( commandsApi.endpoints.getCommandPreview.matchFulfilled, (state, action) => { - state.thread.currentMaximumContextTokens = action.payload.number_context; - state.thread.currentMessageContextTokens = action.payload.current_context; // assuming that this number is amount of tokens per current message + const rt = getCurrentRuntime(state); + if (rt) { + rt.thread.currentMaximumContextTokens = action.payload.number_context; + rt.thread.currentMessageContextTokens = action.payload.current_context; + } }, ); }); @@ -541,7 +1522,7 @@ export function maybeAppendToolCallResultFromIdeToMessages( if (hasDiff) return; const maybeToolResult = messages.find( - (d) => isToolMessage(d) && d.content.tool_call_id === toolCallId, + (d) => isToolMessage(d) && d.tool_call_id === toolCallId, ); const toolCalls = messages.reduce<ToolCall[]>((acc, message) => { @@ -561,16 +1542,16 @@ export function maybeAppendToolCallResultFromIdeToMessages( if ( maybeToolResult && isToolMessage(maybeToolResult) && - typeof maybeToolResult.content.content === "string" + typeof maybeToolResult.content === "string" ) { - maybeToolResult.content.content = message; + maybeToolResult.content = message; return; } else if ( maybeToolResult && isToolMessage(maybeToolResult) && - isMultiModalToolResult(maybeToolResult.content) + Array.isArray(maybeToolResult.content) ) { - maybeToolResult.content.content.push({ + maybeToolResult.content.push({ m_type: "text", m_content: message, }); @@ -585,12 +1566,9 @@ export function maybeAppendToolCallResultFromIdeToMessages( if (assistantMessageIndex === -1) return; const toolMessage: ToolMessage = { role: "tool", - content: { - content: message, - tool_call_id: toolCallId, - // assuming, that tool_failed is always false at this point - tool_failed: false, - }, + tool_call_id: toolCallId, + content: message, + tool_failed: false, }; messages.splice(assistantMessageIndex + 1, 0, toolMessage); diff --git a/refact-agent/gui/src/features/Chat/Thread/selectors.ts b/refact-agent/gui/src/features/Chat/Thread/selectors.ts index 6a10a1050..8001fd862 100644 --- a/refact-agent/gui/src/features/Chat/Thread/selectors.ts +++ b/refact-agent/gui/src/features/Chat/Thread/selectors.ts @@ -1,81 +1,279 @@ import { RootState } from "../../../app/store"; import { createSelector } from "@reduxjs/toolkit"; import { - CompressionStrength, isAssistantMessage, isDiffMessage, isToolMessage, isUserMessage, + ChatMessages, + ToolResult, + ToolMessage, } from "../../../services/refact/types"; import { takeFromLast } from "../../../utils/takeFromLast"; +import { + ChatThreadRuntime, + QueuedItem, + ThreadConfirmation, + ImageFile, + TodoItem, + TodoStatus, +} from "./types"; +import type { SessionState } from "../../../utils/sessionStatus"; + +const EMPTY_MESSAGES: ChatMessages = []; +const EMPTY_QUEUED: QueuedItem[] = []; +const EMPTY_PAUSE_REASONS: ThreadConfirmation["pause_reasons"] = []; +const EMPTY_IMAGES: ImageFile[] = []; +const DEFAULT_NEW_CHAT_SUGGESTED = { wasSuggested: false } as const; +const DEFAULT_CONFIRMATION: ThreadConfirmation = { + pause: false, + pause_reasons: [], + status: { wasInteracted: false, confirmationStatus: true }, +}; +const DEFAULT_CONFIRMATION_STATUS = { + wasInteracted: false, + confirmationStatus: true, +} as const; + +function deriveSessionStateFromRuntime( + rt: ChatThreadRuntime | undefined, +): SessionState | undefined { + if (!rt) return undefined; + // Use stored session_state if available (for waiting_user_input, completed, etc.) + if (rt.session_state) { + return rt.session_state as SessionState; + } + // Fallback to derived state from booleans + if (rt.error) return "error"; + if (rt.confirmation.pause) return "paused"; + if (rt.streaming) return "generating"; + if (rt.waiting_for_response) return "executing_tools"; + return "idle"; +} + +export const selectCurrentThreadId = (state: RootState) => + state.chat.current_thread_id; +export const selectOpenThreadIds = (state: RootState) => + state.chat.open_thread_ids; +export const selectAllThreads = ( + state: RootState, +): Record<string, ChatThreadRuntime | undefined> => state.chat.threads; + +export type TabDisplayData = { + id: string; + title: string; + session_state?: string; + mode?: string; +}; + +export const selectTabsDisplayData = createSelector( + [ + selectOpenThreadIds, + selectAllThreads, + (state: RootState) => state.history.chats, + ], + (openIds, threads, historyChats): TabDisplayData[] => + openIds.map((id) => { + const runtime = threads[id]; + const historyItem = historyChats[id] as + | (typeof historyChats)[string] + | undefined; + const liveSessionState = deriveSessionStateFromRuntime(runtime); + return { + id, + title: runtime?.thread.title ?? historyItem?.title ?? "New Chat", + session_state: liveSessionState ?? historyItem?.session_state, + mode: runtime?.thread.mode ?? historyItem?.mode, + }; + }), +); + +export const selectRuntimeById = ( + state: RootState, + chatId: string, +): ChatThreadRuntime | null => { + return state.chat.threads[chatId] ?? null; +}; + +export const selectCurrentRuntime = ( + state: RootState, +): ChatThreadRuntime | null => + state.chat.threads[state.chat.current_thread_id] ?? null; + +export const selectThreadById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.thread ?? null; + +export const selectThread = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread ?? null; + +export const selectThreadTitle = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.title; + +export const selectChatId = (state: RootState) => state.chat.current_thread_id; + +export const selectModel = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.model ?? ""; + +export const selectMessages = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.messages ?? + EMPTY_MESSAGES; + +export const selectMessagesById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.thread.messages ?? EMPTY_MESSAGES; -export const selectThread = (state: RootState) => state.chat.thread; -export const selectThreadTitle = (state: RootState) => state.chat.thread.title; -export const selectChatId = (state: RootState) => state.chat.thread.id; -export const selectModel = (state: RootState) => state.chat.thread.model; -export const selectMessages = (state: RootState) => state.chat.thread.messages; export const selectToolUse = (state: RootState) => state.chat.tool_use; + export const selectThreadToolUse = (state: RootState) => - state.chat.thread.tool_use; -export const selectAutomaticPatch = (state: RootState) => - state.chat.thread.automatic_patch; + state.chat.threads[state.chat.current_thread_id]?.thread.tool_use; + +export const selectAutoApproveEditingTools = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread + .auto_approve_editing_tools ?? false; + +export const selectAutoApproveDangerousCommands = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread + .auto_approve_dangerous_commands ?? false; export const selectCheckpointsEnabled = (state: RootState) => state.chat.checkpoints_enabled; export const selectThreadBoostReasoning = (state: RootState) => - state.chat.thread.boost_reasoning; + state.chat.threads[state.chat.current_thread_id]?.thread.boost_reasoning; export const selectIncludeProjectInfo = (state: RootState) => - state.chat.thread.include_project_info; + state.chat.threads[state.chat.current_thread_id]?.thread.include_project_info; export const selectContextTokensCap = (state: RootState) => - state.chat.thread.context_tokens_cap; + state.chat.threads[state.chat.current_thread_id]?.thread.context_tokens_cap; + +export const selectReasoningEffort = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.reasoning_effort; + +export const selectThinkingBudget = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.thinking_budget; + +export const selectTemperature = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.temperature; + +export const selectFrequencyPenalty = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.frequency_penalty; + +export const selectMaxTokens = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.max_tokens; + +export const selectParallelToolCalls = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.thread.parallel_tool_calls; -// TBD: only used when `/links` suggests a new chat. export const selectThreadNewChatSuggested = (state: RootState) => - state.chat.thread.new_chat_suggested; + state.chat.threads[state.chat.current_thread_id]?.thread.new_chat_suggested ?? + DEFAULT_NEW_CHAT_SUGGESTED; + export const selectThreadMaximumTokens = (state: RootState) => - state.chat.thread.currentMaximumContextTokens; + state.chat.threads[state.chat.current_thread_id]?.thread + .currentMaximumContextTokens; + +export const selectEffectiveMaxContextTokens = (state: RootState) => { + const thread = state.chat.threads[state.chat.current_thread_id]?.thread; + if (!thread) return undefined; + const modelMax = thread.currentMaximumContextTokens; + const cap = thread.context_tokens_cap; + if (cap && cap > 0) { + return modelMax && modelMax > 0 ? Math.min(cap, modelMax) : cap; + } + return modelMax; +}; + export const selectThreadCurrentMessageTokens = (state: RootState) => - state.chat.thread.currentMessageContextTokens; + state.chat.threads[state.chat.current_thread_id]?.thread + .currentMessageContextTokens; + export const selectIsWaiting = (state: RootState) => - state.chat.waiting_for_response; + state.chat.threads[state.chat.current_thread_id]?.waiting_for_response ?? + false; + +export const selectIsWaitingById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.waiting_for_response ?? false; + export const selectAreFollowUpsEnabled = (state: RootState) => state.chat.follow_ups_enabled; -export const selectIsTitleGenerationEnabled = (state: RootState) => - state.chat.title_generation_enabled; -export const selectUseCompression = (state: RootState) => - state.chat.use_compression; -export const selectIsStreaming = (state: RootState) => state.chat.streaming; -export const selectPreventSend = (state: RootState) => state.chat.prevent_send; -export const selectChatError = (state: RootState) => state.chat.error; + +export const selectIsStreaming = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.streaming ?? false; + +export const selectIsStreamingById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.streaming ?? false; + +export const selectSnapshotReceived = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.snapshot_received ?? false; + +export const selectSnapshotReceivedById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.snapshot_received ?? false; + +export const selectPreventSend = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.prevent_send ?? false; + +export const selectPreventSendById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.prevent_send ?? false; + +export const selectChatError = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.error ?? null; + +export const selectChatErrorById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.error ?? null; + export const selectSendImmediately = (state: RootState) => - state.chat.send_immediately; + state.chat.threads[state.chat.current_thread_id]?.send_immediately ?? false; + export const getSelectedSystemPrompt = (state: RootState) => state.chat.system_prompt; -export const toolMessagesSelector = createSelector( - selectMessages, - (messages) => { - return messages.filter(isToolMessage); - }, +export const selectAnyThreadStreaming = createSelector( + [selectAllThreads], + (threads) => Object.values(threads).some((rt) => rt?.streaming), +); + +export const selectStreamingThreadIds = createSelector( + [selectAllThreads], + (threads) => + Object.entries(threads) + .filter(([, rt]) => rt?.streaming) + .map(([id]) => id), +); + +export const toolMessagesSelector = createSelector(selectMessages, (messages) => + messages.filter(isToolMessage), ); export const selectToolResultById = createSelector( [toolMessagesSelector, (_, id?: string) => id], (messages, id) => { - return messages.find((message) => message.content.tool_call_id === id) - ?.content; + if (!id) return undefined; + for (let i = messages.length - 1; i >= 0; i--) { + const m = messages[i]; + if (m.tool_call_id === id) { + return { + tool_call_id: m.tool_call_id, + content: m.content, + tool_failed: m.tool_failed, + } as ToolResult; + } + } + return undefined; }, ); - export const selectManyToolResultsByIds = (ids: string[]) => - createSelector(toolMessagesSelector, (messages) => { - return messages - .filter((message) => ids.includes(message.content.tool_call_id)) - .map((toolMessage) => toolMessage.content); - }); + createSelector(toolMessagesSelector, (messages) => + messages + .filter((message) => ids.includes(message.tool_call_id)) + .map( + (msg) => + ({ + tool_call_id: msg.tool_call_id, + content: msg.content, + tool_failed: msg.tool_failed, + }) as ToolResult, + ), + ); const selectDiffMessages = createSelector(selectMessages, (messages) => messages.filter(isDiffMessage), @@ -83,96 +281,316 @@ const selectDiffMessages = createSelector(selectMessages, (messages) => export const selectDiffMessageById = createSelector( [selectDiffMessages, (_, id?: string) => id], - (messages, id) => { - return messages.find((message) => message.tool_call_id === id); - }, + (messages, id) => messages.find((message) => message.tool_call_id === id), ); export const selectManyDiffMessageByIds = (ids: string[]) => - createSelector(selectDiffMessages, (diffs) => { - return diffs.filter((message) => ids.includes(message.tool_call_id)); - }); + createSelector(selectDiffMessages, (diffs) => + diffs.filter((message) => ids.includes(message.tool_call_id)), + ); export const getSelectedToolUse = (state: RootState) => - state.chat.thread.tool_use; + state.chat.threads[state.chat.current_thread_id]?.thread.tool_use; export const selectIntegration = createSelector( selectThread, - (thread) => thread.integration, + (thread) => thread?.integration, ); export const selectThreadMode = createSelector( selectThread, - (thread) => thread.mode, + (thread) => thread?.mode, ); -export const selectLastSentCompression = createSelector( - selectMessages, - (messages) => { - const lastCompression = messages.reduce<null | CompressionStrength>( - (acc, message) => { - if (isUserMessage(message) && message.compression_strength) { - return message.compression_strength; - } - if (isToolMessage(message) && message.content.compression_strength) { - return message.content.compression_strength; - } - return acc; - }, - null, - ); - - return lastCompression; - }, -); +export const selectQueuedItems = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.queued_items ?? + EMPTY_QUEUED; -export const selectQueuedMessages = (state: RootState) => - state.chat.queued_messages; +export const selectQueuedItemsById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.queued_items ?? EMPTY_QUEUED; -export const selectQueuedMessagesCount = createSelector( - selectQueuedMessages, +export const selectQueuedItemsCount = createSelector( + selectQueuedItems, (queued) => queued.length, ); -export const selectHasQueuedMessages = createSelector( - selectQueuedMessages, +export const selectHasQueuedItems = createSelector( + selectQueuedItems, (queued) => queued.length > 0, ); +function hasUncalledToolsInMessages( + messages: ReturnType<typeof selectMessages>, +): boolean { + if (messages.length === 0) return false; + const tailMessages = takeFromLast(messages, isUserMessage); + + const toolCalls = tailMessages.reduce<string[]>((acc, cur) => { + if (!isAssistantMessage(cur)) return acc; + if (!cur.tool_calls || cur.tool_calls.length === 0) return acc; + const curToolCallIds = cur.tool_calls + .map((toolCall) => toolCall.id) + .filter( + (id): id is string => id !== undefined && !id.startsWith("srvtoolu_"), + ); + return [...acc, ...curToolCallIds]; + }, []); + + if (toolCalls.length === 0) return false; + + const toolMessages = tailMessages + .map((msg) => { + if (isToolMessage(msg)) return msg.tool_call_id; + if ("tool_call_id" in msg && typeof msg.tool_call_id === "string") + return msg.tool_call_id; + return undefined; + }) + .filter((id): id is string => typeof id === "string"); + + return toolCalls.some((toolCallId) => !toolMessages.includes(toolCallId)); +} + +export const selectHasUncalledToolsById = ( + state: RootState, + chatId: string, +): boolean => hasUncalledToolsInMessages(selectMessagesById(state, chatId)); + export const selectHasUncalledTools = createSelector( selectMessages, - (messages) => { - if (messages.length === 0) return false; - const tailMessages = takeFromLast(messages, isUserMessage); - - const toolCalls = tailMessages.reduce<string[]>((acc, cur) => { - if (!isAssistantMessage(cur)) return acc; - if (!cur.tool_calls || cur.tool_calls.length === 0) return acc; - const curToolCallIds = cur.tool_calls - .map((toolCall) => toolCall.id) - .filter((id) => id !== undefined); - - return [...acc, ...curToolCallIds]; - }, []); - - if (toolCalls.length === 0) return false; - - const toolMessages = tailMessages - .map((msg) => { - if (isToolMessage(msg)) { - return msg.content.tool_call_id; - } - if ("tool_call_id" in msg && typeof msg.tool_call_id === "string") { - return msg.tool_call_id; - } - return undefined; - }) - .filter((id): id is string => typeof id === "string"); + hasUncalledToolsInMessages, +); + +export const selectThreadConfirmation = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.confirmation ?? + DEFAULT_CONFIRMATION; + +export const selectThreadConfirmationById = ( + state: RootState, + chatId: string, +) => state.chat.threads[chatId]?.confirmation ?? DEFAULT_CONFIRMATION; + +export const selectThreadPauseReasons = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.confirmation + .pause_reasons ?? EMPTY_PAUSE_REASONS; + +export const selectThreadPause = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.confirmation.pause ?? false; + +export const selectThreadPauseById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.confirmation.pause ?? false; + +export const selectThreadConfirmationStatus = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.confirmation.status ?? + DEFAULT_CONFIRMATION_STATUS; + +export const selectThreadImages = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.attached_images ?? + EMPTY_IMAGES; + +export const selectThreadImagesById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.attached_images ?? EMPTY_IMAGES; + +const EMPTY_TEXT_FILES: import("./types").TextFile[] = []; + +export const selectThreadTextFiles = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.attached_text_files ?? + EMPTY_TEXT_FILES; + +export const selectThreadTextFilesById = (state: RootState, chatId: string) => + state.chat.threads[chatId]?.attached_text_files ?? EMPTY_TEXT_FILES; + +export const selectSseRefreshRequested = (state: RootState) => + state.chat.sse_refresh_requested; + +export const selectStreamVersion = (state: RootState): number => + state.chat.stream_version; + +// Task Progress Widget selectors + +export const selectTaskWidgetExpanded = (state: RootState) => + state.chat.threads[state.chat.current_thread_id]?.task_widget_expanded ?? + false; + +export const selectTaskWidgetExpandedById = ( + state: RootState, + chatId: string, +) => state.chat.threads[chatId]?.task_widget_expanded ?? false; + +function normalizeTaskStatus(status: unknown): TodoStatus | null { + if (typeof status !== "string") return null; + switch (status.toLowerCase()) { + case "pending": + return "pending"; + case "in_progress": + case "in-progress": + case "inprogress": + return "in_progress"; + case "completed": + case "done": + case "complete": + return "completed"; + case "failed": + case "error": + return "failed"; + default: + return null; + } +} + +function sanitizeText(text: string, maxLen: number): string { + return ( + text + // eslint-disable-next-line no-control-regex + .replace(/[\x00-\x1F\x7F]/g, "") + .trim() + .slice(0, maxLen) + ); +} - const hasUnsentTools = toolCalls.some( - (toolCallId) => !toolMessages.includes(toolCallId), +function parseTasksFromArgs(argsStr: string): TodoItem[] | null { + try { + const args = JSON.parse(argsStr) as unknown; + if (!args || typeof args !== "object") return null; + const tasksArray = (args as Record<string, unknown>).tasks; + if (!Array.isArray(tasksArray)) return null; + + if (tasksArray.length === 0) return []; + + const result: TodoItem[] = []; + const seenIds = new Set<string>(); + + for (const item of tasksArray) { + if (!item || typeof item !== "object") continue; + const t = item as Record<string, unknown>; + + const rawId = + typeof t.id === "string" + ? t.id + : typeof t.id === "number" + ? String(t.id) + : null; + if (!rawId) continue; + + const id = sanitizeText(rawId, 50); + if (!id || seenIds.has(id)) continue; + seenIds.add(id); + + const rawContent = typeof t.content === "string" ? t.content : null; + if (!rawContent) continue; + + const content = sanitizeText(rawContent, 500); + if (!content) continue; + + const status = normalizeTaskStatus(t.status); + if (!status) continue; + + result.push({ id, content, status }); + } + return result.length > 0 ? result : null; + } catch { + return null; + } +} + +export function deriveTasksFromMessages( + messages: ChatMessages, + toolMessages: ToolMessage[], +): TodoItem[] { + const successfulToolIds = new Set( + toolMessages.filter((m) => !m.tool_failed).map((m) => m.tool_call_id), + ); + + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]; + if (!isAssistantMessage(msg) || !msg.tool_calls) continue; + + for (let j = msg.tool_calls.length - 1; j >= 0; j--) { + const tc = msg.tool_calls[j]; + if (tc.function.name !== "tasks_set" || !tc.id) continue; + if (!successfulToolIds.has(tc.id)) continue; + + const parsed = parseTasksFromArgs(tc.function.arguments); + if (parsed !== null) return parsed; + } + } + + return []; +} + +export const selectCurrentTasks = createSelector( + [selectMessages, toolMessagesSelector], + (messages, toolMessages): TodoItem[] => + deriveTasksFromMessages(messages, toolMessages), +); + +export const selectCurrentTasksById = (state: RootState, chatId: string) => { + const messages = selectMessagesById(state, chatId); + const toolMessages = messages.filter(isToolMessage); + return deriveTasksFromMessages(messages, toolMessages); +}; + +export const selectHasTasks = createSelector( + [selectCurrentTasks], + (tasks) => tasks.length > 0, +); + +export const selectTasksEverUsed = createSelector( + [selectMessages, toolMessagesSelector], + (messages, toolMessages): boolean => { + const successfulToolIds = new Set( + toolMessages.filter((m) => !m.tool_failed).map((m) => m.tool_call_id), ); - return hasUnsentTools; + for (const msg of messages) { + if (!isAssistantMessage(msg) || !msg.tool_calls) continue; + for (const tc of msg.tool_calls) { + if ( + tc.function.name === "tasks_set" && + tc.id && + successfulToolIds.has(tc.id) + ) { + return true; + } + } + } + return false; + }, +); + +export const selectTaskProgress = createSelector( + [selectCurrentTasks], + (tasks): { done: number; total: number; activeTitle?: string } => { + const done = tasks.filter((t) => t.status === "completed").length; + const active = tasks.find((t) => t.status === "in_progress"); + return { + done, + total: tasks.length, + activeTitle: active?.content, + }; }, ); + +export type TaskProgressInfo = { + done: number; + total: number; + failed: number; +}; + +/** + * Compute task progress from messages array. + * Useful for history items that have messages but aren't in Redux state. + */ +export function getTaskProgressFromMessages( + messages: ChatMessages, +): TaskProgressInfo | null { + const toolMessages = messages.filter(isToolMessage); + const tasks = deriveTasksFromMessages(messages, toolMessages); + + if (tasks.length === 0) return null; + + return { + done: tasks.filter((t) => t.status === "completed").length, + total: tasks.length, + failed: tasks.filter((t) => t.status === "failed").length, + }; +} diff --git a/refact-agent/gui/src/features/Chat/Thread/types.ts b/refact-agent/gui/src/features/Chat/Thread/types.ts index 25091e93e..3822aa153 100644 --- a/refact-agent/gui/src/features/Chat/Thread/types.ts +++ b/refact-agent/gui/src/features/Chat/Thread/types.ts @@ -1,13 +1,38 @@ -import { Usage } from "../../../services/refact"; +import { ToolConfirmationPauseReason, Usage } from "../../../services/refact"; import { SystemPrompts } from "../../../services/refact/prompts"; -import { ChatMessages, UserMessage } from "../../../services/refact/types"; +import { ChatMessages } from "../../../services/refact/types"; import { parseOrElse } from "../../../utils/parseOrElse"; -export type QueuedUserMessage = { +export type ImageFile = { + name: string; + content: string | ArrayBuffer | null; + type: string; +}; + +export type TextFile = { + name: string; + content: string; +}; + +export type ToolConfirmationStatus = { + wasInteracted: boolean; + confirmationStatus: boolean; +}; + +// Task Progress Widget types +export type TodoStatus = "pending" | "in_progress" | "completed" | "failed"; + +export type TodoItem = { id: string; - message: UserMessage; - createdAt: number; - priority?: boolean; + content: string; + status: TodoStatus; +}; + +export type QueuedItem = { + client_request_id: string; + priority: boolean; + command_type: string; + preview: string; }; export type IntegrationMeta = { @@ -16,6 +41,16 @@ export type IntegrationMeta = { project?: string; shouldIntermediatePageShowUp?: boolean; }; + +export type ReasoningEffort = + | "none" + | "minimal" + | "low" + | "medium" + | "high" + | "xhigh" + | "max"; + export type ChatThread = { id: string; messages: ChatMessages; @@ -24,20 +59,45 @@ export type ChatThread = { createdAt?: string; updatedAt?: string; tool_use?: ToolUse; - read?: boolean; isTitleGenerated?: boolean; boost_reasoning?: boolean; + /** Reasoning effort level: "low", "medium", "high", "xhigh", or "max" */ + reasoning_effort?: ReasoningEffort; + /** Thinking budget in tokens (for Anthropic, Qwen, Gemini 2.5) */ + thinking_budget?: number; + /** Temperature for sampling (0-2) */ + temperature?: number; + /** Frequency penalty for sampling (-2 to 2) */ + frequency_penalty?: number; + /** Maximum tokens for response */ + max_tokens?: number; + /** Whether to allow parallel tool calls */ + parallel_tool_calls?: boolean; integration?: IntegrationMeta | null; - mode?: LspChatMode; + mode?: ChatModeId; project_name?: string; last_user_message_id?: string; new_chat_suggested: SuggestedChat; - automatic_patch?: boolean; + auto_approve_editing_tools?: boolean; + auto_approve_dangerous_commands?: boolean; currentMaximumContextTokens?: number; currentMessageContextTokens?: number; increase_max_tokens?: boolean; include_project_info?: boolean; context_tokens_cap?: number; + checkpoints_enabled?: boolean; + /** If true, this chat belongs to a task workspace and should not appear in regular chat tabs */ + is_task_chat?: boolean; + /** Task metadata for task-related chats */ + task_meta?: { + task_id: string; + role: string; + agent_id?: string; + card_id?: string; + }; + + /** OpenAI Responses API multi-turn state: link next request to the previous response */ + previous_response_id?: string; }; export type SuggestedChat = { @@ -47,22 +107,76 @@ export type SuggestedChat = { export type ToolUse = "quick" | "explore" | "agent"; -export type Chat = { - streaming: boolean; +export type ChatModeId = string; + +export const DEFAULT_MODE: ChatModeId = "agent"; + +export function normalizeLegacyMode(mode: string | undefined): ChatModeId { + if (!mode) return DEFAULT_MODE; + const upper = mode.toUpperCase(); + switch (upper) { + case "NO_TOOLS": + return "explore"; + case "EXPLORE": + return "explore"; + case "AGENT": + return "agent"; + case "CONFIGURE": + return "configurator"; + case "PROJECT_SUMMARY": + return "project_summary"; + case "TASK_PLANNER": + return "task_planner"; + case "TASK_AGENT": + return "task_agent"; + default: + if (mode === mode.toLowerCase()) return mode; + return DEFAULT_MODE; + } +} + +export type ThreadConfirmation = { + pause: boolean; + pause_reasons: ToolConfirmationPauseReason[]; + status: ToolConfirmationStatus; +}; + +export type ChatThreadRuntime = { thread: ChatThread; - error: null | string; - prevent_send: boolean; - checkpoints_enabled?: boolean; + streaming: boolean; waiting_for_response: boolean; - max_new_tokens?: number; - cache: Record<string, ChatThread>; + prevent_send: boolean; + error: string | null; + queued_items: QueuedItem[]; + send_immediately: boolean; + attached_images: ImageFile[]; + attached_text_files: TextFile[]; + confirmation: ThreadConfirmation; + /** Whether the initial snapshot has been received from the backend */ + snapshot_received: boolean; + /** Task progress widget expanded/collapsed state */ + task_widget_expanded: boolean; + /** Actual session state from backend (for waiting_user_input, completed, etc.) */ + session_state?: string; + /** Last applied chat SSE event seq for duplicate/out-of-order protection */ + last_applied_seq?: string; + /** Fast lookup index from message_id to message index (rebuilt on snapshots/mutations) */ + message_index_by_id?: Record<string, number>; +}; + +export type Chat = { + current_thread_id: string; + open_thread_ids: string[]; + threads: Record<string, ChatThreadRuntime | undefined>; system_prompt: SystemPrompts; tool_use: ToolUse; - send_immediately: boolean; + checkpoints_enabled?: boolean; follow_ups_enabled?: boolean; - title_generation_enabled?: boolean; - use_compression?: boolean; - queued_messages: QueuedUserMessage[]; + max_new_tokens?: number; + /** When set, useChatSubscription should reconnect to get fresh state */ + sse_refresh_requested: string | null; + /** Increments on every stream_delta to force component re-renders */ + stream_version: number; }; export type PayloadWithId = { id: string }; @@ -121,42 +235,7 @@ export function isToolUse(str: string): str is ToolUse { return str === "quick" || str === "explore" || str === "agent"; } -export type LspChatMode = - | "NO_TOOLS" - | "EXPLORE" - | "AGENT" - | "CONFIGURE" - | "PROJECT_SUMMARY"; - -export function isLspChatMode(mode: string): mode is LspChatMode { - return ( - mode === "NO_TOOLS" || - mode === "EXPLORE" || - mode === "AGENT" || - mode === "CONFIGURE" || - mode === "PROJECT_SUMMARY" - ); -} - -export function chatModeToLspMode({ - toolUse, - mode, - defaultMode, -}: { - toolUse?: ToolUse; - mode?: LspChatMode; - defaultMode?: LspChatMode; -}): LspChatMode { - if (defaultMode) { - return defaultMode; - } - if (mode) { - return mode; - } - if (toolUse === "agent") return "AGENT"; - if (toolUse === "quick") return "NO_TOOLS"; - return "EXPLORE"; -} +export type LspChatMode = string; // Helper to detect server-executed tools (already executed by LLM provider) // These tools have IDs starting with "srvtoolu_" and should NOT be sent to backend for execution diff --git a/refact-agent/gui/src/features/Chat/Thread/utils.test.ts b/refact-agent/gui/src/features/Chat/Thread/utils.test.ts index deafd63ed..f372f0b74 100644 --- a/refact-agent/gui/src/features/Chat/Thread/utils.test.ts +++ b/refact-agent/gui/src/features/Chat/Thread/utils.test.ts @@ -1,1642 +1,6 @@ -import { describe, expect, test, vi } from "vitest"; -import { - ChatMessages, - ChatResponse, - PlainTextMessage, - PlainTextResponse, - UserMessage, - UserMessageResponse, - type ToolCall, -} from "../../../services/refact"; -import { - mergeToolCalls, - formatChatResponse, - consumeStream, - postProcessMessagesAfterStreaming, -} from "./utils"; - -describe("formatChatResponse", () => { - test("it should replace the last user message", () => { - const message: UserMessageResponse = { - id: "test", - content: " what is this for?\n", - role: "user", - }; - - const messages: ChatMessages = [ - { role: "user", content: "Hello" }, - { - role: "assistant", - content: "Hi", - tool_calls: [ - { - function: { - arguments: - '{"problem_statement":"What is the difference between the Toad and Frog classes?"}', - name: "locate", - }, - id: "call_6qxVYwV6MTcazl1Fy5pRlImi", - index: 0, - type: "function", - }, - ], - }, - { - role: "tool", - content: { - tool_call_id: "call_6qxVYwV6MTcazl1Fy5pRlImi", - content: "stuff", - tool_failed: false, - }, - }, - { - role: "context_file", - content: [ - { - file_content: "stuff", - file_name: "refact-chat-js/src/services/refact/chat.ts", - line1: 1, - line2: 85, - usefulness: 0, - }, - ], - }, - { - role: "assistant", - content: "test response", - }, - { - role: "user", - content: - "@file /Users/marc/Projects/refact-chat-js/src/__fixtures__/chat_diff.ts what is this for?\n", - }, - { - role: "context_file", - content: [ - { - file_content: "test content", - file_name: "refact-chat-js/src/__fixtures__/chat_diff.ts", - line1: 1, - line2: 30, - usefulness: 0, - }, - ], - }, - ]; - - const result = formatChatResponse(messages, message); - - const expected = [ - ...messages.slice(0, 5), - ...messages.slice(6), - { role: message.role, content: message.content }, - ]; - - expect(result).toEqual(expected); - }); - - test("it should put plain text before a user message at the end of the array", () => { - const userMessage: UserMessage = { - role: "user", - content: "Hello", - }; - - const sentMessages = [userMessage]; - - const updatedUserMessage: UserMessage = { - role: "user", - content: "hi", - }; - - const userMessageResponse: UserMessageResponse = { - ...updatedUserMessage, - id: "user message", - }; - - const plainTextMessage: PlainTextMessage = { - role: "plain_text", - content: "test", - }; - - const plainTextResponse: PlainTextResponse = { - ...plainTextMessage, - tool_call_id: "toolCallId", - }; - - const response = [plainTextResponse, userMessageResponse]; - - const result = response.reduce<ChatMessages>((messages, message) => { - return formatChatResponse(messages, message); - }, sentMessages); - - const expected = [plainTextMessage, updatedUserMessage]; - - expect(result).toEqual(expected); - }); - - test("price with message", () => { - const chunks: ChatResponse[] = [ - { - id: "", - role: "user", - content: "hello\n", - checkpoints: [ - { - workspace_folder: "/refact", - commit_hash: "6710babc75beb5198be8a7a2b4ba6c095afa2158", - }, - ], - compression_strength: "absent", - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "Hello", - role: "assistant", - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "!", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " How", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " can", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " I", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " assist", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " you", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " with", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " your", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " project", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " today", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "?", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: "stop", - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - usage: { - completion_tokens: 14, - prompt_tokens: 2818, - total_tokens: 2832, - completion_tokens_details: { - accepted_prediction_tokens: 0, - audio_tokens: 0, - reasoning_tokens: 0, - rejected_prediction_tokens: 0, - }, - prompt_tokens_details: { audio_tokens: 0, cached_tokens: 0 }, - }, - }, - { - id: "chatcmpl-d103cc09-5306-43d3-9fb3-609e5e61948a", - created: 1746094949.359174, - model: "gpt-4.1", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - usage: { - completion_tokens: 14, - prompt_tokens: 2818, - total_tokens: 2832, - completion_tokens_details: { - accepted_prediction_tokens: 0, - audio_tokens: 0, - reasoning_tokens: 0, - rejected_prediction_tokens: 0, - }, - prompt_tokens_details: { audio_tokens: 0, cached_tokens: 0 }, - }, - metering_coins_prompt: 5.636, - metering_coins_generated: 0.112, - metering_coins_cache_creation: 0.0, - metering_coins_cache_read: 0.0, - metering_prompt_tokens_n: 2818, - metering_generated_tokens_n: 14, - metering_cache_creation_tokens_n: 0, - metering_cache_read_tokens_n: 0, - metering_balance: 1085, - refact_agent_request_available: null, - refact_agent_max_request_num: 40, - }, - { - id: "", - choices: [ - { - index: 0, - delta: { role: "assistant", content: "", tool_calls: null }, - finish_reason: "stop", - }, - ], - created: 1746094949.359174, - model: "gpt-4.1", - }, - ]; - - const result = chunks.reduce<ChatMessages>((acc, cur) => { - return formatChatResponse(acc, cur); - }, []); - - expect(result).toEqual([ - { - checkpoints: [ - { - commit_hash: "6710babc75beb5198be8a7a2b4ba6c095afa2158", - workspace_folder: "/refact", - }, - ], - compression_strength: "absent", - content: "hello\n", - role: "user", - }, - { - content: "Hello! How can I assist you with your project today?", - finish_reason: "stop", - metering_balance: 1085, - metering_cache_creation_tokens_n: 0, - metering_cache_read_tokens_n: 0, - metering_coins_cache_creation: 0, - metering_coins_cache_read: 0, - metering_coins_generated: 0.112, - metering_coins_prompt: 5.636, - metering_prompt_tokens_n: 2818, - metering_generated_tokens_n: 14, - reasoning_content: "", - role: "assistant", - thinking_blocks: undefined, - tool_calls: undefined, - usage: { - completion_tokens: 14, - completion_tokens_details: { - accepted_prediction_tokens: 0, - audio_tokens: 0, - reasoning_tokens: 0, - rejected_prediction_tokens: 0, - }, - prompt_tokens: 2818, - prompt_tokens_details: { - audio_tokens: 0, - cached_tokens: 0, - }, - total_tokens: 2832, - }, - }, - ]); - }); - - test("byok usage", () => { - const chunks: ChatResponse[] = [ - { - id: "", - role: "user", - content: "call tree and then do nothing\n", - checkpoints: [ - { - workspace_folder: "/someplace", - commit_hash: "d7fd24f70133348f01a80f6f9a54628e2ee56777", - }, - ], - compression_strength: "absent", - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "I'll call", - role: "assistant", - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " the `tree` function to show the project structure", - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: " and then do nothing else as requested.", - role: null, - - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "", - role: "assistant", - - tool_calls: [ - { - id: "toolu_01SZSQHfY6jRi4TSd9HTRy6e", - function: { - arguments: "", - name: "tree", - }, - type: "function", - index: 0, - }, - ], - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "", - role: "assistant", - - tool_calls: [ - // odd that some of these are null? - // { - // id: null, - // function: { - // arguments: "", - // name: null, - // }, - // type: "function", - // index: 0, - // }, - ], - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: "", - role: "assistant", - - tool_calls: [ - // { - // id: null, - // function: { - // arguments: "{}", - // name: null, - // }, - // type: "function", - // index: 0, - // }, - ], - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: "tool_calls", - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: null, - role: null, - - tool_calls: null, - }, - }, - ], - - usage: { - completion_tokens: 56, - prompt_tokens: 3, - total_tokens: 59, - completion_tokens_details: { - accepted_prediction_tokens: null, - audio_tokens: null, - reasoning_tokens: 0, - rejected_prediction_tokens: null, - }, - prompt_tokens_details: { - audio_tokens: null, - cached_tokens: 0, - }, - cache_creation_input_tokens: 9170, - cache_read_input_tokens: 0, - }, - }, - { - id: "chatcmpl-db1e8dbd-5170-4a35-bc62-ae5aa6f46fa4", - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - - choices: [ - { - finish_reason: null, - index: 0, - delta: { - content: null, - role: null, - tool_calls: null, - }, - }, - ], - usage: { - completion_tokens: 56, - prompt_tokens: 3, - total_tokens: 59, - completion_tokens_details: { - accepted_prediction_tokens: null, - audio_tokens: null, - reasoning_tokens: 0, - rejected_prediction_tokens: null, - }, - prompt_tokens_details: { - audio_tokens: null, - cached_tokens: 0, - }, - cache_creation_input_tokens: 9170, - cache_read_input_tokens: 0, - }, - metering_coins_prompt: 0.009, - metering_coins_generated: 0.84, - metering_coins_cache_creation: 34.3875, - metering_coins_cache_read: 0.0, - metering_prompt_tokens_n: 3, - metering_generated_tokens_n: 56, - metering_cache_creation_tokens_n: 9170, - metering_cache_read_tokens_n: 0, - metering_balance: 952433, - refact_agent_request_available: null, - refact_agent_max_request_num: 400, - }, - { - id: "", - choices: [ - { - index: 0, - delta: { - role: "assistant", - content: "", - tool_calls: null, - }, - finish_reason: "stop", - }, - ], - created: 1746115727.9020996, - model: "claude-3-7-sonnet", - }, - ]; - - const results = chunks.reduce<ChatMessages>( - (acc, cur) => formatChatResponse(acc, cur), - [], - ); - - expect(results).toEqual([ - { - checkpoints: [ - { - commit_hash: "d7fd24f70133348f01a80f6f9a54628e2ee56777", - workspace_folder: "/someplace", - }, - ], - compression_strength: "absent", - content: "call tree and then do nothing\n", - role: "user", - }, - { - content: - "I'll call the `tree` function to show the project structure and then do nothing else as requested.", - finish_reason: "stop", - metering_balance: 952433, - metering_cache_creation_tokens_n: 9170, - metering_cache_read_tokens_n: 0, - metering_coins_cache_creation: 34.3875, - metering_coins_cache_read: 0, - metering_coins_generated: 0.84, - metering_coins_prompt: 0.009, - metering_prompt_tokens_n: 3, - metering_generated_tokens_n: 56, - reasoning_content: "", - role: "assistant", - thinking_blocks: undefined, - tool_calls: [ - { - function: { - arguments: "", - name: "tree", - }, - id: "toolu_01SZSQHfY6jRi4TSd9HTRy6e", - index: 0, - type: "function", - }, - ], - usage: { - cache_creation_input_tokens: 9170, - cache_read_input_tokens: 0, - completion_tokens: 56, - completion_tokens_details: { - accepted_prediction_tokens: null, - audio_tokens: null, - reasoning_tokens: 0, - rejected_prediction_tokens: null, - }, - prompt_tokens: 3, - prompt_tokens_details: { - audio_tokens: null, - cached_tokens: 0, - }, - total_tokens: 59, - }, - }, - ]); - }); - - test("byok short usage", () => { - const chunks: ChatResponse[] = [ - { - id: "", - role: "user", - content: "please tell me a joke, don't call any tools\n", - checkpoints: [ - { - workspace_folder: - "/home/andrii-lashchov/Desktop/work/refact/refact-agent/engine", - commit_hash: "b71c8387f951b81a1b9cd388f3d46c94eb302ebe", - }, - ], - compression_strength: "absent", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - role: "assistant", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: "I'", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: "d tell you a joke about UDP, but you", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: " might not get it.\n\nWait", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: ", here's another one:", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: " Why do programmers prefer dark mode?", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: { - content: " Because light attracts bugs!", - }, - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - { - id: "msg_01SrL8iCZWJGWhYF2obVNXeV", - choices: [ - { - index: 0, - delta: {}, - finish_reason: "stop", - }, - ], - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - usage: { - completion_tokens: 41, - prompt_tokens: 9359, - total_tokens: 9400, - }, - }, - { - id: "", - choices: [ - { - index: 0, - delta: { - role: "assistant", - content: "", - tool_calls: null, - }, - finish_reason: "stop", - }, - ], - - created: 1746117659.9634643, - model: "claude-3-7-sonnet-latest", - }, - ]; - - const result = chunks.reduce<ChatMessages>( - (messages, chunk) => formatChatResponse(messages, chunk), - [], - ); - - expect(result).toEqual([ - { - checkpoints: [ - { - commit_hash: "b71c8387f951b81a1b9cd388f3d46c94eb302ebe", - workspace_folder: - "/home/andrii-lashchov/Desktop/work/refact/refact-agent/engine", - }, - ], - compression_strength: "absent", - content: "please tell me a joke, don't call any tools\n", - role: "user", - }, - { - content: - "I'd tell you a joke about UDP, but you might not get it.\n\nWait, here's another one: Why do programmers prefer dark mode? Because light attracts bugs!", - finish_reason: "stop", - metering_balance: undefined, - metering_cache_creation_tokens_n: undefined, - metering_cache_read_tokens_n: undefined, - metering_coins_cache_creation: undefined, - metering_coins_cache_read: undefined, - metering_coins_generated: undefined, - metering_coins_prompt: undefined, - metering_prompt_tokens_n: undefined, - reasoning_content: "", - role: "assistant", - thinking_blocks: undefined, - tool_calls: undefined, - usage: { - completion_tokens: 41, - prompt_tokens: 9359, - total_tokens: 9400, - }, - }, - ]); - }); - - test("gemini", () => { - const chunks: ChatResponse[] = [ - { - id: "", - role: "user", - content: "call tree\n", - checkpoints: [ - { - workspace_folder: "/emergency_frog_situation", - commit_hash: "9592d97a746d392d180491bd5a44339d83f1c19c", - }, - ], - compression_strength: "absent", - }, - { - choices: [ - { - delta: { - content: "Okay, I will", - role: "assistant", - }, - index: 0, - }, - ], - created: 1746186404.4522197, - model: "gemini-2.5-pro-exp-03-25", - id: "", - usage: { - completion_tokens: 4, - prompt_tokens: 3547, - total_tokens: 3577, - }, - }, - { - choices: [ - { - delta: { - content: " call the `tree()` tool to show the project structure.", - role: "assistant", - }, - index: 0, - }, - ], - created: 1746186404.4522197, - model: "gemini-2.5-pro-exp-03-25", - id: "", - usage: { - completion_tokens: 16, - prompt_tokens: 3547, - total_tokens: 3601, - }, - }, - { - choices: [ - { - delta: { - role: "assistant", - tool_calls: [ - { - function: { - arguments: "{}", - name: "tree", - }, - id: "call_247e2a7b080d44fe83a655fd18d17277", - type: "function", - index: 0, - }, - ], - }, - finish_reason: "tool_calls", - index: 0, - }, - ], - created: 1746186404.4522197, - model: "gemini-2.5-pro-exp-03-25", - usage: { - completion_tokens: 24, - prompt_tokens: 3547, - total_tokens: 3604, - }, - }, - { - choices: [ - { - index: 0, - delta: { - role: "assistant", - content: "", - tool_calls: null, - }, - finish_reason: "stop", - }, - ], - created: 1746186404.4522197, - model: "gemini-2.5-pro-exp-03-25", - }, - ]; - - const result = chunks.reduce<ChatMessages>( - (acc, cur) => formatChatResponse(acc, cur), - [], - ); - - expect(result).toEqual([ - { - checkpoints: [ - { - commit_hash: "9592d97a746d392d180491bd5a44339d83f1c19c", - workspace_folder: "/emergency_frog_situation", - }, - ], - compression_strength: "absent", - content: "call tree\n", - role: "user", - }, - { - content: - "Okay, I will call the `tree()` tool to show the project structure.", - finish_reason: "stop", - metering_balance: undefined, - metering_cache_creation_tokens_n: undefined, - metering_cache_read_tokens_n: undefined, - metering_coins_cache_creation: undefined, - metering_coins_cache_read: undefined, - metering_coins_generated: undefined, - metering_coins_prompt: undefined, - metering_prompt_tokens_n: undefined, - reasoning_content: "", - role: "assistant", - thinking_blocks: undefined, - tool_calls: [ - { - function: { - arguments: "{}", - name: "tree", - }, - id: "call_247e2a7b080d44fe83a655fd18d17277", - index: 0, - type: "function", - }, - ], - usage: { - completion_tokens: 24, - prompt_tokens: 3547, - total_tokens: 3604, - }, - }, - ]); - }); - - test("byok openai usage", () => { - const chunks: ChatResponse[] = [ - { - id: "", - role: "user", - content: "hello\n", - checkpoints: [ - { - workspace_folder: "/Users/marc/Projects/refact", - commit_hash: "5365c0e1efde9a8a4b9be199ea8cd47e4cc5acfd", - }, - ], - compression_strength: "absent", - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - role: "assistant", - content: "", - // refusal: null, - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: "Hello", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: "!", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " I'm", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " Ref", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: "act", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " Agent", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: ",", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " your", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " coding", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " assistant", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: ".", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " How", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " can", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " I", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " help", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " you", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: " today", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: { - content: "?", - }, - finish_reason: null, - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [ - { - index: 0, - delta: {}, - finish_reason: "stop", - }, - ], - usage: null, - }, - { - id: "chatcmpl-BUBWQDOHxOWUxzDW2DxvUR462yMpT", - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - // service_tier: "default", - // system_fingerprint: "fp_8810992130", - choices: [], - usage: { - prompt_tokens: 2876, - completion_tokens: 222, - total_tokens: 3098, - prompt_tokens_details: { - cached_tokens: 2688, - audio_tokens: 0, - }, - completion_tokens_details: { - reasoning_tokens: 192, - audio_tokens: 0, - accepted_prediction_tokens: 0, - rejected_prediction_tokens: 0, - }, - }, - }, - { - choices: [ - { - index: 0, - delta: { - role: "assistant", - content: "", - tool_calls: null, - }, - finish_reason: "stop", - }, - ], - // object: "chat.completion.chunk", - created: 1746533829.888066, - model: "o3-mini", - }, - ]; - - const result = chunks.reduce<ChatMessages>( - (acc, cur) => formatChatResponse(acc, cur), - [], - ); - - expect(result).toEqual([ - { - checkpoints: [ - { - commit_hash: "5365c0e1efde9a8a4b9be199ea8cd47e4cc5acfd", - workspace_folder: "/Users/marc/Projects/refact", - }, - ], - compression_strength: "absent", - content: "hello\n", - role: "user", - }, - { - content: - "Hello! I'm Refact Agent, your coding assistant. How can I help you today?", - finish_reason: "stop", - metering_balance: undefined, - metering_cache_creation_tokens_n: undefined, - metering_cache_read_tokens_n: undefined, - metering_coins_cache_creation: undefined, - metering_coins_cache_read: undefined, - metering_coins_generated: undefined, - metering_coins_prompt: undefined, - metering_prompt_tokens_n: undefined, - reasoning_content: "", - role: "assistant", - thinking_blocks: undefined, - tool_calls: undefined, - usage: { - prompt_tokens: 2876, - completion_tokens: 222, - total_tokens: 3098, - prompt_tokens_details: { - cached_tokens: 2688, - audio_tokens: 0, - }, - completion_tokens_details: { - reasoning_tokens: 192, - audio_tokens: 0, - accepted_prediction_tokens: 0, - rejected_prediction_tokens: 0, - }, - }, - }, - ]); - }); -}); +import { describe, expect, test } from "vitest"; +import { ChatMessages, type ToolCall } from "../../../services/refact"; +import { mergeToolCalls, postProcessMessagesAfterStreaming } from "./utils"; describe("mergeToolCalls", () => { test("combines two tool calls", () => { @@ -1678,65 +42,6 @@ describe("mergeToolCalls", () => { }); }); -function stringToUint8Array(str: string): Uint8Array { - const encoder = new TextEncoder(); - return encoder.encode(str); -} - -describe("consumeStream", () => { - test("it should handle split packets", async () => { - const packet1 = stringToUint8Array('data: {"key": "test"}\n\n'); - const packet2 = stringToUint8Array('data: {"key":'); - const packet3 = stringToUint8Array('"value"}\n\n'); - - const reader = new ReadableStream<Uint8Array>({ - start(controller) { - controller.enqueue(packet1); - controller.enqueue(packet2); - controller.enqueue(packet3); - controller.close(); - }, - }).getReader(); - - const onAbort = vi.fn(); - const onChunk = vi.fn(); - const abort = new AbortController(); - - await consumeStream(reader, abort.signal, onAbort, onChunk); - - expect(onAbort).not.toBeCalled(); - expect(onChunk).toBeCalledWith({ key: "test" }); - expect(onChunk).toBeCalledWith({ key: "value" }); - }); - - test("it only splits at \\n\\n", async () => { - const packet1 = stringToUint8Array( - 'data: {"content":"```py\\nprint(\\"hello\\")\\n\\n', - ); - const packet2 = stringToUint8Array('```\\n"}\n\n'); - - const reader = new ReadableStream<Uint8Array>({ - start(controller) { - controller.enqueue(packet1); - controller.enqueue(packet2); - controller.close(); - }, - }).getReader(); - - const onAbort = vi.fn(); - const onChunk = vi.fn(); - const abort = new AbortController(); - - await consumeStream(reader, abort.signal, onAbort, onChunk); - - expect(onAbort).not.toBeCalled(); - - expect(onChunk).toHaveBeenCalledWith({ - content: '```py\nprint("hello")\n\n```\n', - }); - }); -}); - describe("postProcessMessagesAfterStreaming", () => { test("should filter out server-executed tool calls and store in server_executed_tools", () => { const messages: ChatMessages = [ diff --git a/refact-agent/gui/src/features/Chat/Thread/utils.ts b/refact-agent/gui/src/features/Chat/Thread/utils.ts index a5ec4012c..8eb0002eb 100644 --- a/refact-agent/gui/src/features/Chat/Thread/utils.ts +++ b/refact-agent/gui/src/features/Chat/Thread/utils.ts @@ -2,67 +2,23 @@ import { AssistantMessage, ChatContextFile, ChatContextFileMessage, - ChatMessage, ChatMessages, - ChatResponse, - DiffChunk, - SubchatResponse, ToolCall, ToolMessage, - ToolResult, UserMessage, - WebSearchCitation, - isAssistantDelta, isAssistantMessage, - isCDInstructionResponse, - isChatContextFileDelta, - isChatResponseChoice, - isContextFileResponse, isDiffChunk, isDiffMessage, - isDiffResponse, isLspUserMessage, - isPlainTextResponse, - isSubchatContextFileResponse, - isSubchatResponse, - isSystemResponse, - isToolCallDelta, - isThinkingBlocksDelta, isToolContent, isToolMessage, - isToolResponse, isUserMessage, - isUserResponse, ThinkingBlock, - isToolCallMessage, - Usage, } from "../../../services/refact"; import { v4 as uuidv4 } from "uuid"; import { parseOrElse } from "../../../utils"; import { type LspChatMessage } from "../../../services/refact"; -import { checkForDetailMessage, isServerExecutedTool } from "./types"; - -function extractCitationFromDelta( - delta: unknown, -): WebSearchCitation | undefined { - if (!delta || typeof delta !== "object") return undefined; - const d = delta as Record<string, unknown>; - const psf = d.provider_specific_fields; - if (!psf || typeof psf !== "object") return undefined; - const psfObj = psf as Record<string, unknown>; - const citation = psfObj.citation; - if (!citation || typeof citation !== "object") return undefined; - const c = citation as Record<string, unknown>; - // Validate it's a web search citation - if ( - c.type === "web_search_result_location" && - typeof c.url === "string" && - typeof c.title === "string" - ) { - return citation as WebSearchCitation; - } - return undefined; -} +import { isServerExecutedTool } from "./types"; export function postProcessMessagesAfterStreaming( messages: ChatMessages, @@ -127,33 +83,6 @@ function deduplicateToolCalls(toolCalls: ToolCall[]): ToolCall[] { return Array.from(toolCallMap.values()); } -// export const TAKE_NOTE_MESSAGE = [ -// 'How many times user has corrected or directed you? Write "Number of correction points N".', -// 'Then start each one with "---\n", describe what you (the assistant) did wrong, write "Mistake: ..."', -// 'Write documentation to tools or the project in general that will help you next time, describe in detail how tools work, or what the project consists of, write "Documentation: ..."', -// "A good documentation for a tool describes what is it for, how it helps to answer user's question, what applicability criteia were discovered, what parameters work and how it will help the user.", -// "A good documentation for a project describes what folders, files are there, summarization of each file, classes. Start documentation for the project with project name.", -// "After describing all points, call note_to_self() in parallel for each actionable point, generate keywords that should include the relevant tools, specific files, dirs, and put documentation-like paragraphs into text.", -// ].join("\n"); - -// export const TAKE_NOTE_MESSAGE = [ -// "How many times user has corrected you about tool usage? Call note_to_self() with this exact format:", -// "", -// "CORRECTION_POINTS: N", -// "", -// "POINT1 WHAT_I_DID_WRONG: i should have used ... tool call or method or plan ... instead of this tool call or method or plan", -// "POINT1 WAS_I_SUCCESSFUL_AFTER_CORRECTION: YES/NO", -// "POINT1 FOR_FUTURE_FEREFENCE: when ... [describe situation when it's applicable] use ... tool call or method or plan", -// "POINT1 HOW_NEW_IS_THIS_NOTE: 0-5", -// "POINT1 HOW_INSIGHTFUL_IS_THIS_NOTE: 0-5", -// "", -// "POINT2 WHAT_I_DID_WRONG: ...", -// "POINT2 WAS_I_SUCCESSFUL_AFTER_CORRECTION: ...", -// "POINT2 FOR_FUTURE_FEREFENCE: ...", -// "POINT2 HOW_NEW_IS_THIS_NOTE: ...", -// "POINT2 HOW_INSIGHTFUL_IS_THIS_NOTE: ...", -// ].join("\n"); - export const TAKE_NOTE_MESSAGE = `How many times did you used a tool incorrectly, so it didn't produce the indented result? Call remember_how_to_use_tools() with this exact format: CORRECTION_POINTS: N @@ -238,494 +167,6 @@ export function lastIndexOf<T>(arr: T[], predicate: (a: T) => boolean): number { return index; } -function replaceLastUserMessage( - messages: ChatMessages, - userMessage: UserMessage, -): ChatMessages { - if (messages.length === 0) { - return [userMessage]; - } - const lastUserMessageIndex = lastIndexOf<ChatMessage>( - messages, - isUserMessage, - ); - - const result = messages.filter((_, index) => index !== lastUserMessageIndex); - - return result.concat([userMessage]); -} - -function takeHighestUsage( - a?: Usage | null, - b?: Usage | null, -): Usage | undefined { - if (a == null) return b ?? undefined; - if (b == null) return a; - return a.total_tokens > b.total_tokens ? a : b; -} - -type MeteringBalance = Pick< - AssistantMessage, - | "metering_balance" - | "metering_cache_creation_tokens_n" - | "metering_cache_read_tokens_n" - | "metering_prompt_tokens_n" - | "metering_generated_tokens_n" - | "metering_coins_prompt" - | "metering_coins_generated" - | "metering_coins_cache_creation" - | "metering_coins_cache_read" ->; - -function lowestNumber(a?: number, b?: number): number | undefined { - if (a === undefined) return b; - if (b === undefined) return a; - return Math.min(a, b); -} -function highestNumber(a?: number, b?: number): number | undefined { - if (a === undefined) return b; - if (b === undefined) return a; - return Math.max(a, b); -} -function mergeMetering( - a: MeteringBalance, - b: MeteringBalance, -): MeteringBalance { - return { - metering_balance: lowestNumber(a.metering_balance, b.metering_balance), - metering_cache_creation_tokens_n: highestNumber( - a.metering_cache_creation_tokens_n, - b.metering_cache_creation_tokens_n, - ), - metering_cache_read_tokens_n: highestNumber( - a.metering_cache_read_tokens_n, - b.metering_cache_read_tokens_n, - ), - metering_prompt_tokens_n: highestNumber( - a.metering_prompt_tokens_n, - b.metering_prompt_tokens_n, - ), - metering_generated_tokens_n: highestNumber( - a.metering_generated_tokens_n, - b.metering_generated_tokens_n, - ), - metering_coins_prompt: highestNumber( - a.metering_coins_prompt, - b.metering_coins_prompt, - ), - metering_coins_generated: highestNumber( - a.metering_coins_generated, - b.metering_coins_generated, - ), - metering_coins_cache_read: highestNumber( - a.metering_coins_cache_read, - b.metering_coins_cache_read, - ), - metering_coins_cache_creation: highestNumber( - a.metering_coins_cache_creation, - b.metering_coins_cache_creation, - ), - }; -} - -export function formatChatResponse( - messages: ChatMessages, - response: ChatResponse, -): ChatMessages { - if (isUserResponse(response)) { - return replaceLastUserMessage(messages, { - role: response.role, - content: response.content, - checkpoints: response.checkpoints, - compression_strength: response.compression_strength, - }); - } - - if (isContextFileResponse(response)) { - const content = parseOrElse<ChatContextFile[]>(response.content, []); - return [...messages, { role: response.role, content }]; - } - - if (isSubchatResponse(response)) { - return handleSubchatResponse(messages, response); - } - - if (isToolResponse(response)) { - const { - tool_call_id, - content, - tool_failed, - finish_reason, - compression_strength, - } = response; - const filteredMessages = finishToolCallInMessages(messages, tool_call_id); - const toolResult: ToolResult = - typeof content === "string" - ? { - tool_call_id, - content, - finish_reason, - compression_strength, - tool_failed, - } - : { - tool_call_id, - content, - finish_reason, - compression_strength, - tool_failed, - }; - - return [...filteredMessages, { role: response.role, content: toolResult }]; - } - - if (isDiffResponse(response)) { - const content = parseOrElse<DiffChunk[]>(response.content, []); - return [ - ...messages, - { role: response.role, content, tool_call_id: response.tool_call_id }, - ]; - } - - if (isPlainTextResponse(response)) { - return [...messages, { role: response.role, content: response.content }]; - } - - if (isCDInstructionResponse(response)) { - return [...messages, { role: response.role, content: response.content }]; - } - - // system messages go to the front - if (isSystemResponse(response)) { - return [{ role: response.role, content: response.content }, ...messages]; - } - - if (!isChatResponseChoice(response)) { - // console.log("Not a good response"); - // console.log(response); - return messages; - } - - const maybeLastMessage = messages[messages.length - 1]; - - if ( - response.choices.length === 0 && - response.usage && - isAssistantMessage(maybeLastMessage) - ) { - const msg: AssistantMessage = { - ...maybeLastMessage, - usage: response.usage, - ...mergeMetering(maybeLastMessage, response), - }; - return messages.slice(0, -1).concat(msg); - } - - return response.choices.reduce<ChatMessages>((acc, cur) => { - if (isChatContextFileDelta(cur.delta)) { - const msg = { role: cur.delta.role, content: cur.delta.content }; - return acc.concat([msg]); - } - - if ( - acc.length === 0 && - "content" in cur.delta && - typeof cur.delta.content === "string" && - cur.delta.role - ) { - const newCitation = extractCitationFromDelta(cur.delta); - const citations = newCitation ? [newCitation] : undefined; - const msg: AssistantMessage = { - role: cur.delta.role, - content: cur.delta.content, - reasoning_content: cur.delta.reasoning_content, - tool_calls: cur.delta.tool_calls, - thinking_blocks: cur.delta.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: response.usage, - ...mergeMetering({}, response), - }; - return acc.concat([msg]); - } - - const lastMessage = acc[acc.length - 1]; - - if (isToolCallDelta(cur.delta)) { - // Extract citation if present in this chunk - const deltaCitation = extractCitationFromDelta(cur.delta); - - if (!isAssistantMessage(lastMessage)) { - return acc.concat([ - { - role: "assistant", - content: "", // should be like that? - tool_calls: cur.delta.tool_calls, - citations: deltaCitation ? [deltaCitation] : undefined, - finish_reason: cur.finish_reason, - }, - ]); - } - - const last = acc.slice(0, -1); - const collectedCalls = lastMessage.tool_calls ?? []; - const tool_calls = mergeToolCalls(collectedCalls, cur.delta.tool_calls); - const citations = deltaCitation - ? [...(lastMessage.citations ?? []), deltaCitation] - : lastMessage.citations; - - return last.concat([ - { - role: "assistant", - content: lastMessage.content ?? "", - reasoning_content: lastMessage.reasoning_content ?? "", - tool_calls: tool_calls, - thinking_blocks: lastMessage.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: takeHighestUsage(lastMessage.usage, response.usage), - ...mergeMetering(lastMessage, response), - }, - ]); - } - - if (isThinkingBlocksDelta(cur.delta)) { - // Extract citation if present in this chunk - const deltaCitation = extractCitationFromDelta(cur.delta); - - if (!isAssistantMessage(lastMessage)) { - return acc.concat([ - { - role: "assistant", - content: "", // should it be like this? - thinking_blocks: cur.delta.thinking_blocks, - reasoning_content: cur.delta.reasoning_content, - citations: deltaCitation ? [deltaCitation] : undefined, - finish_reason: cur.finish_reason, - }, - ]); - } - - const last = acc.slice(0, -1); - const collectedThinkingBlocks = lastMessage.thinking_blocks ?? []; - const thinking_blocks = mergeThinkingBlocks( - collectedThinkingBlocks, - cur.delta.thinking_blocks ?? [], - ); - const citations = deltaCitation - ? [...(lastMessage.citations ?? []), deltaCitation] - : lastMessage.citations; - - return last.concat([ - { - role: "assistant", - content: lastMessage.content ?? "", - reasoning_content: - (lastMessage.reasoning_content ?? "") + cur.delta.reasoning_content, - tool_calls: lastMessage.tool_calls, - thinking_blocks: thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: takeHighestUsage(lastMessage.usage, response.usage), - ...mergeMetering(lastMessage, response), - }, - ]); - } - - if ( - isAssistantMessage(lastMessage) && - isAssistantDelta(cur.delta) && - typeof cur.delta.content === "string" - ) { - const last = acc.slice(0, -1); - // Extract citation from provider_specific_fields if present - const newCitation = extractCitationFromDelta(cur.delta); - const citations = newCitation - ? [...(lastMessage.citations ?? []), newCitation] - : lastMessage.citations; - return last.concat([ - { - role: "assistant", - content: (lastMessage.content ?? "") + cur.delta.content, - reasoning_content: - (lastMessage.reasoning_content ?? "") + - (cur.delta.reasoning_content ?? ""), - tool_calls: lastMessage.tool_calls, - thinking_blocks: lastMessage.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: takeHighestUsage(lastMessage.usage, response.usage), - ...mergeMetering(lastMessage, response), - }, - ]); - } else if ( - isAssistantDelta(cur.delta) && - typeof cur.delta.content === "string" - ) { - const newCitation = extractCitationFromDelta(cur.delta); - const citations = newCitation ? [newCitation] : undefined; - return acc.concat([ - { - role: "assistant", - content: cur.delta.content, - reasoning_content: cur.delta.reasoning_content, - thinking_blocks: cur.delta.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - // usage: currentUsage, // here? - usage: response.usage, - ...mergeMetering({}, response), - }, - ]); - } else if (cur.delta.role === "assistant") { - // empty message from JB - // maybe here? - return acc; - } - - if (cur.delta.role === null || cur.finish_reason !== null) { - // NOTE: deepseek for some reason doesn't send role in all deltas - // If cur.delta.role === 'assistant' || cur.delta.role === null, then if last message's role is not assistant, then creating a new assistant message - // TODO: if cur.delta.role === 'assistant', then taking out from cur.delta all possible fields and values, attaching to current assistant message, sending back this one - if (!isAssistantMessage(lastMessage) && isAssistantDelta(cur.delta)) { - const newCitation = extractCitationFromDelta(cur.delta); - const citations = newCitation ? [newCitation] : undefined; - return acc.concat([ - { - role: "assistant", - content: cur.delta.content ?? "", - reasoning_content: cur.delta.reasoning_content, - tool_calls: cur.delta.tool_calls, - thinking_blocks: cur.delta.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: response.usage, - ...mergeMetering({}, response), - }, - ]); - } - - const last = acc.slice(0, -1); - if ( - (isAssistantMessage(lastMessage) || isToolCallMessage(lastMessage)) && - isAssistantDelta(cur.delta) - ) { - const newCitation = extractCitationFromDelta(cur.delta); - const citations = newCitation - ? [...(lastMessage.citations ?? []), newCitation] - : lastMessage.citations; - return last.concat([ - { - role: "assistant", - content: (lastMessage.content ?? "") + (cur.delta.content ?? ""), - reasoning_content: - (lastMessage.reasoning_content ?? "") + - (cur.delta.reasoning_content ?? ""), - tool_calls: lastMessage.tool_calls, - thinking_blocks: lastMessage.thinking_blocks, - citations: citations, - finish_reason: cur.finish_reason, - usage: takeHighestUsage(lastMessage.usage, response.usage), - ...mergeMetering(lastMessage, response), - }, - ]); - } - - if (isAssistantMessage(lastMessage) && response.usage) { - return last.concat([ - { - ...lastMessage, - usage: takeHighestUsage(lastMessage.usage, response.usage), - ...mergeMetering(lastMessage, response), - }, - ]); - } - } - - // console.log("Fall though"); - // console.log({ cur, lastMessage }); - - return acc; - }, messages); -} - -function handleSubchatResponse( - messages: ChatMessages, - response: SubchatResponse, -): ChatMessages { - function iter( - msgs: ChatMessages, - resp: SubchatResponse, - accumulator: ChatMessages = [], - ) { - if (msgs.length === 0) return accumulator; - - const [head, ...tail] = msgs; - - if (!isAssistantMessage(head) || !head.tool_calls) { - return iter(tail, response, accumulator.concat(head)); - } - - const maybeToolCall = head.tool_calls.find( - (toolCall) => toolCall.id === resp.tool_call_id, - ); - - if (!maybeToolCall) return iter(tail, response, accumulator.concat(head)); - - const addMessageFiles = isSubchatContextFileResponse(resp.add_message) - ? parseOrElse<ChatContextFile[]>(resp.add_message.content, []).map( - (file) => file.file_name, - ) - : []; - - const attachedFiles = maybeToolCall.attached_files - ? [...maybeToolCall.attached_files, ...addMessageFiles] - : addMessageFiles; - - const toolCallWithCubChat: ToolCall = { - ...maybeToolCall, - subchat: response.subchat_id, - attached_files: attachedFiles, - }; - - const toolCalls = head.tool_calls.map((toolCall) => { - if (toolCall.id === toolCallWithCubChat.id) return toolCallWithCubChat; - return toolCall; - }); - - const message: AssistantMessage = { - ...head, - tool_calls: toolCalls, - }; - - const nextAccumulator = [...accumulator, message]; - return iter(tail, response, nextAccumulator); - } - - return iter(messages, response); -} - -function finishToolCallInMessages( - messages: ChatMessages, - toolCallId: string, -): ChatMessages { - return messages.map((message) => { - if (!isAssistantMessage(message)) { - return message; - } - if (!message.tool_calls) { - return message; - } - const tool_calls = message.tool_calls.map((toolCall) => { - if (toolCall.id !== toolCallId) { - return toolCall; - } - return { ...toolCall, attached_files: undefined, subchat: undefined }; - }); - return { ...message, tool_calls }; - }); -} - export function formatMessagesForLsp(messages: ChatMessages): LspChatMessage[] { return messages.reduce<LspChatMessage[]>((acc, message) => { if (isUserMessage(message)) { @@ -749,8 +190,8 @@ export function formatMessagesForLsp(messages: ChatMessages): LspChatMessage[] { return acc.concat([ { role: "tool", - content: message.content.content, - tool_call_id: message.content.tool_call_id, + content: message.content, + tool_call_id: message.tool_call_id, }, ]); } @@ -793,11 +234,15 @@ export function formatMessagesForChat( }); } - if ( - message.role === "context_file" && - typeof message.content === "string" - ) { - const files = parseOrElse<ChatContextFile[]>(message.content, []); + if (message.role === "context_file") { + let files: ChatContextFile[]; + if (typeof message.content === "string") { + files = parseOrElse<ChatContextFile[]>(message.content, []); + } else if (Array.isArray(message.content)) { + files = message.content as ChatContextFile[]; + } else { + files = []; + } const contextFileMessage: ChatContextFileMessage = { role: message.role, content: files, @@ -845,137 +290,3 @@ export function formatMessagesForChat( return acc; }, []); } - -function isValidBuffer(buffer: Uint8Array): boolean { - // Check if the buffer is long enough - if (buffer.length < 8) return false; // "data: " is 6 bytes + 2 bytes for "\n\n" - - // Check the start for "data: " - const startsWithData = - buffer[0] === 100 && // 'd' - buffer[1] === 97 && // 'a' - buffer[2] === 116 && // 't' - buffer[3] === 97 && // 'a' - buffer[4] === 58 && // ':' - buffer[5] === 32; // ' ' - - // Check the end for "\n\n" - const endsWithNewline = - buffer[buffer.length - 2] === 10 && // '\n' - buffer[buffer.length - 1] === 10; // '\n' - - return startsWithData && endsWithNewline; -} - -function bufferStartsWithDetail(buffer: Uint8Array): boolean { - const startsWithDetail = - buffer[0] === 123 && // '{' - buffer[1] === 34 && // '"' - buffer[2] === 100 && // 'd' - buffer[3] === 101 && // 'e' - buffer[4] === 116 && // 't' - buffer[5] === 97 && // 'a' - buffer[6] === 105 && // 'i' - buffer[7] === 108 && // 'l' - buffer[8] === 34 && // '"' - buffer[9] === 58; // ':' - - return startsWithDetail; -} - -export function consumeStream( - reader: ReadableStreamDefaultReader<Uint8Array>, - signal: AbortSignal, - onAbort: () => void, - onChunk: (chunk: Record<string, unknown>) => void, -) { - const decoder = new TextDecoder(); - - function pump({ - done, - value, - }: ReadableStreamReadResult<Uint8Array>): Promise<void> { - if (done) return Promise.resolve(); - if (signal.aborted) { - onAbort(); - return Promise.resolve(); - } - - if (bufferStartsWithDetail(value)) { - const str = decoder.decode(value); - const maybeError = checkForDetailMessage(str); - if (maybeError) { - return Promise.reject(maybeError); - } - } - - const combineBufferAndRetry = () => { - return reader.read().then((more) => { - if (more.done) return; // left with an invalid buffer - const buff = new Uint8Array(value.length + more.value.length); - buff.set(value); - buff.set(more.value, value.length); - - return pump({ done, value: buff }); - }); - }; - - if (!isValidBuffer(value)) { - return combineBufferAndRetry(); - } - - const streamAsString = decoder.decode(value); - - const deltas = streamAsString.split("\n\n").filter((str) => str.length > 0); - - if (deltas.length === 0) return Promise.resolve(); - - for (const delta of deltas) { - if (!delta.startsWith("data: ")) { - // eslint-disable-next-line no-console - console.log("Unexpected data in streaming buf: " + delta); - continue; - } - - const maybeJsonString = delta.substring(6); - - if (maybeJsonString === "[DONE]") { - return Promise.resolve(); - } - - if (maybeJsonString === "[ERROR]") { - const errorMessage = "error from lsp"; - const error = new Error(errorMessage); - - return Promise.reject(error); - } - - const maybeErrorData = checkForDetailMessage(maybeJsonString); - if (maybeErrorData) { - const errorMessage: string = - typeof maybeErrorData.detail === "string" - ? maybeErrorData.detail - : JSON.stringify(maybeErrorData.detail); - const error = new Error(errorMessage); - // eslint-disable-next-line no-console - console.error(error); - return Promise.reject(maybeErrorData); - } - - const fallback = {}; - const json = parseOrElse<Record<string, unknown>>( - maybeJsonString, - fallback, - ); - - if (json === fallback) { - return combineBufferAndRetry(); - } - - onChunk(json); - } - return reader.read().then(pump); - } - - return reader.read().then(pump); -} diff --git a/refact-agent/gui/src/features/Chat/currentProject.ts b/refact-agent/gui/src/features/Chat/currentProject.ts index 39c74e05c..e5b42f75e 100644 --- a/refact-agent/gui/src/features/Chat/currentProject.ts +++ b/refact-agent/gui/src/features/Chat/currentProject.ts @@ -24,8 +24,14 @@ export const currentProjectInfoReducer = createReducer( ); export const selectThreadProjectOrCurrentProject = (state: RootState) => { - if (state.chat.thread.integration?.project) { - return state.chat.thread.integration.project; + const threadId = state.chat.current_thread_id; + const runtime = threadId ? state.chat.threads[threadId] : undefined; + if (!runtime) { + return state.current_project.name; } - return state.chat.thread.project_name ?? state.current_project.name; + const thread = runtime.thread; + if (thread.integration?.project) { + return thread.integration.project; + } + return thread.project_name ?? state.current_project.name; }; diff --git a/refact-agent/gui/src/features/Checkpoints/CheckpointButton.tsx b/refact-agent/gui/src/features/Checkpoints/CheckpointButton.tsx index 64416d79f..b897729a2 100644 --- a/refact-agent/gui/src/features/Checkpoints/CheckpointButton.tsx +++ b/refact-agent/gui/src/features/Checkpoints/CheckpointButton.tsx @@ -22,14 +22,15 @@ export const CheckpointButton = ({ return ( <IconButton - size="2" - variant="soft" + size="1" + variant="ghost" title={isPreviewing ? "Reverting..." : "Revert agent changes"} onClick={() => void handlePreview(checkpoints, messageIndex)} loading={isPreviewing} disabled={!isOnline || isStreaming || isWaiting} + style={{ width: 20, height: 20 }} > - <ResetIcon /> + <ResetIcon width={12} height={12} /> </IconButton> ); }; diff --git a/refact-agent/gui/src/features/Checkpoints/Checkpoints.stories.tsx b/refact-agent/gui/src/features/Checkpoints/Checkpoints.stories.tsx index 1cbc735c1..0229e9b06 100644 --- a/refact-agent/gui/src/features/Checkpoints/Checkpoints.stories.tsx +++ b/refact-agent/gui/src/features/Checkpoints/Checkpoints.stories.tsx @@ -14,9 +14,6 @@ const Template: React.FC<{ initialState?: CheckpointsMeta }> = ({ initialState, }) => { const store = setUpStore({ - tour: { - type: "finished", - }, config: { apiKey: "foo", addressURL: "Refact", diff --git a/refact-agent/gui/src/features/Checkpoints/Checkpoints.tsx b/refact-agent/gui/src/features/Checkpoints/Checkpoints.tsx index 545a783bf..b8b39e081 100644 --- a/refact-agent/gui/src/features/Checkpoints/Checkpoints.tsx +++ b/refact-agent/gui/src/features/Checkpoints/Checkpoints.tsx @@ -1,4 +1,5 @@ -import { Dialog, Flex, Text, Button } from "@radix-ui/themes"; +import { useState } from "react"; +import { Dialog, Flex, Text, Button, RadioGroup } from "@radix-ui/themes"; import { useCheckpoints, useEventsBusForIDE } from "../../hooks"; import { TruncateLeft } from "../../components/Text"; import { Link } from "../../components/Link"; @@ -10,6 +11,8 @@ import { formatPathName } from "../../utils/formatPathName"; import { CheckpointsStatusIndicator } from "./CheckpointsStatusIndicator"; import { ErrorCallout } from "../../components/Callout"; +export type RestoreMode = "files_only" | "files_and_messages"; + export const Checkpoints = () => { const { openFile } = useEventsBusForIDE(); const { @@ -23,6 +26,9 @@ export const Checkpoints = () => { errorLog, } = useCheckpoints(); + const [restoreMode, setRestoreMode] = + useState<RestoreMode>("files_and_messages"); + const clientTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone; const formattedDate = formatDateOrTimeBasedOnToday( reverted_to, @@ -39,9 +45,8 @@ export const Checkpoints = () => { onOpenChange={(state) => { if (!state) { handleUndo(); - } else { - void handleFix(); } + // Don't auto-call handleFix on open - user must click the button }} > <Dialog.Content className={styles.CheckpointsDialog}> @@ -108,12 +113,33 @@ export const Checkpoints = () => { {errorLog.join("\n")} </ErrorCallout> )} - <Flex - gap="3" - mt={wereFilesChanged ? "4" : "2"} - justify="between" - wrap="wrap" - > + + <Flex direction="column" gap="2" mt="4"> + <Text size="2" weight="medium"> + Restore options: + </Text> + <RadioGroup.Root + value={restoreMode} + onValueChange={(value) => setRestoreMode(value as RestoreMode)} + > + <Flex direction="column" gap="2"> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <RadioGroup.Item value="files_and_messages" /> + <span>Restore files and undo messages after this point</span> + </Flex> + </Text> + <Text as="label" size="2"> + <Flex gap="2" align="center"> + <RadioGroup.Item value="files_only" /> + <span>Restore files only (keep messages)</span> + </Flex> + </Text> + </Flex> + </RadioGroup.Root> + </Flex> + + <Flex gap="3" mt="4" justify="between" wrap="wrap"> <Flex gap="3" wrap="wrap" justify="start"> <Button type="button" @@ -126,7 +152,7 @@ export const Checkpoints = () => { <Button loading={isRestoring} disabled={errorLog.length > 0} - onClick={() => void handleFix()} + onClick={() => void handleFix(restoreMode)} title={ isRestoring ? "Rolling back..." diff --git a/refact-agent/gui/src/features/Checkpoints/checkpointsSlice.ts b/refact-agent/gui/src/features/Checkpoints/checkpointsSlice.ts index 7d982cac7..06e007a95 100644 --- a/refact-agent/gui/src/features/Checkpoints/checkpointsSlice.ts +++ b/refact-agent/gui/src/features/Checkpoints/checkpointsSlice.ts @@ -4,6 +4,8 @@ import { Checkpoint, PreviewCheckpointsResponse } from "./types"; export type CheckpointsMeta = { latestCheckpointResult: PreviewCheckpointsResponse & { current_checkpoints: Checkpoint[]; + chat_id: string; + chat_mode?: string; }; isVisible: boolean; isUndoing: boolean; @@ -18,6 +20,8 @@ const initialState: CheckpointsMeta = { current_checkpoints: [], reverted_changes: [], error_log: [], + chat_id: "", + chat_mode: undefined, }, isVisible: false, isUndoing: false, @@ -35,6 +39,8 @@ export const checkpointsSlice = createSlice({ PreviewCheckpointsResponse & { messageIndex: number; current_checkpoints: Checkpoint[]; + chat_id: string; + chat_mode?: string; } >, ) => { diff --git a/refact-agent/gui/src/features/Checkpoints/types.ts b/refact-agent/gui/src/features/Checkpoints/types.ts index e79a46781..0c76d5a94 100644 --- a/refact-agent/gui/src/features/Checkpoints/types.ts +++ b/refact-agent/gui/src/features/Checkpoints/types.ts @@ -18,10 +18,14 @@ export type RevertedCheckpointData = { export type PreviewCheckpointsPayload = { checkpoints: Checkpoint[]; + chat_id: string; + chat_mode?: string; }; export type RestoreCheckpointsPayload = { checkpoints: Checkpoint[]; + chat_id: string; + chat_mode?: string; }; export type PreviewCheckpointsResponse = { @@ -65,6 +69,10 @@ export function isPreviewCheckpointsResponse( if (!Array.isArray(json.reverted_changes)) return false; if (!json.reverted_changes.every(isRevertedCheckpointData)) return false; + // Check error_log array + if (!("error_log" in json) || !Array.isArray(json.error_log)) return false; + if (!json.error_log.every((item) => typeof item === "string")) return false; + return true; } diff --git a/refact-agent/gui/src/features/CoinBalance/coinBalanceSlice.ts b/refact-agent/gui/src/features/CoinBalance/coinBalanceSlice.ts index fc3004a8f..a732182e5 100644 --- a/refact-agent/gui/src/features/CoinBalance/coinBalanceSlice.ts +++ b/refact-agent/gui/src/features/CoinBalance/coinBalanceSlice.ts @@ -1,7 +1,6 @@ import { createSlice } from "@reduxjs/toolkit"; import { smallCloudApi } from "../../services/smallcloud"; -import { chatResponse } from "../Chat"; -import { isChatResponseChoice } from "../../events"; +import { applyChatEvent } from "../Chat/Thread/actions"; type CoinBalance = { balance: number; @@ -9,26 +8,72 @@ type CoinBalance = { const initialState: CoinBalance = { balance: 0, }; + +function extractMeteringBalance(event: unknown): number | null { + if (typeof event !== "object" || event === null) return null; + + const e = event as Record<string, unknown>; + + if ("metering_balance" in e && typeof e.metering_balance === "number") { + return e.metering_balance; + } + + if (e.type === "stream_delta" && Array.isArray(e.ops)) { + for (const op of e.ops) { + if ( + typeof op === "object" && + op !== null && + (op as Record<string, unknown>).op === "merge_extra" + ) { + const extra = (op as Record<string, unknown>).extra; + if ( + typeof extra === "object" && + extra !== null && + "metering_balance" in extra && + typeof (extra as Record<string, unknown>).metering_balance === + "number" + ) { + return (extra as Record<string, unknown>).metering_balance as number; + } + } + } + } + + if ( + e.type === "stream_finished" && + typeof e.usage === "object" && + e.usage !== null + ) { + const usage = e.usage as Record<string, unknown>; + if ( + "metering_balance" in usage && + typeof usage.metering_balance === "number" + ) { + return usage.metering_balance; + } + } + + return null; +} + export const coinBallanceSlice = createSlice({ name: "coins", initialState, reducers: {}, extraReducers: (builder) => { + builder.addCase(applyChatEvent, (state, action) => { + const balance = extractMeteringBalance(action.payload); + if (balance !== null) { + state.balance = balance; + } + }); + builder.addMatcher( smallCloudApi.endpoints.getUser.matchFulfilled, (state, action) => { state.balance = action.payload.metering_balance; }, - ), - builder.addMatcher(chatResponse.match, (state, action) => { - if (!isChatResponseChoice(action.payload)) return state; - if ( - "metering_balance" in action.payload && - typeof action.payload.metering_balance === "number" - ) { - state.balance = action.payload.metering_balance; - } - }); + ); }, selectors: { diff --git a/refact-agent/gui/src/features/Connection/connectionSlice.ts b/refact-agent/gui/src/features/Connection/connectionSlice.ts new file mode 100644 index 000000000..8162d8bdc --- /dev/null +++ b/refact-agent/gui/src/features/Connection/connectionSlice.ts @@ -0,0 +1,198 @@ +import { createSlice, PayloadAction } from "@reduxjs/toolkit"; +import { RootState } from "../../app/store"; + +export type BackendStatus = "unknown" | "online" | "offline"; +export type SseStatus = "disconnected" | "connecting" | "connected"; + +export type SseConnectionInfo = { + status: SseStatus; + lastEventAt: number | null; + retryCount: number; + error: string | null; +}; + +export type ConnectionState = { + browserOnline: boolean; + backendStatus: BackendStatus; + backendLastOkAt: number | null; + backendError: string | null; + sseConnections: Partial<Record<string, SseConnectionInfo>>; +}; + +const initialState: ConnectionState = { + browserOnline: typeof navigator !== "undefined" ? navigator.onLine : true, + backendStatus: "unknown", + backendLastOkAt: null, + backendError: null, + sseConnections: {}, +}; + +export const connectionSlice = createSlice({ + name: "connection", + initialState, + reducers: { + setBrowserOnline: (state, action: PayloadAction<boolean>) => { + state.browserOnline = action.payload; + }, + + setBackendStatus: ( + state, + action: PayloadAction<{ + status: BackendStatus; + error?: string | null; + }>, + ) => { + state.backendStatus = action.payload.status; + if (action.payload.status === "online") { + state.backendLastOkAt = Date.now(); + state.backendError = null; + } else if (action.payload.error) { + state.backendError = action.payload.error; + } + }, + + setSseStatus: ( + state, + action: PayloadAction<{ + chatId: string; + status: SseStatus; + error?: string | null; + }>, + ) => { + const { chatId, status, error } = action.payload; + const existing = state.sseConnections[chatId]; + + if (!existing) { + state.sseConnections[chatId] = { + status, + lastEventAt: status === "connected" ? Date.now() : null, + retryCount: status === "disconnected" ? 1 : 0, + error: error ?? null, + }; + } else { + existing.status = status; + if (status === "connected") { + existing.lastEventAt = Date.now(); + existing.retryCount = 0; + existing.error = null; + } else if (status === "disconnected") { + existing.retryCount += 1; + if (error) { + existing.error = error; + } + } + } + }, + + sseEventReceived: (state, action: PayloadAction<{ chatId: string }>) => { + const conn = state.sseConnections[action.payload.chatId]; + if (conn) { + conn.lastEventAt = Date.now(); + } + }, + + resetSseRetryCount: (state, action: PayloadAction<{ chatId: string }>) => { + const conn = state.sseConnections[action.payload.chatId]; + if (conn) { + conn.retryCount = 0; + } + }, + + removeSseConnection: (state, action: PayloadAction<{ chatId: string }>) => { + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete state.sseConnections[action.payload.chatId]; + }, + + clearAllSseConnections: (state) => { + state.sseConnections = {}; + }, + }, +}); + +export const { + setBrowserOnline, + setBackendStatus, + setSseStatus, + sseEventReceived, + resetSseRetryCount, + removeSseConnection, + clearAllSseConnections, +} = connectionSlice.actions; + +export const selectBrowserOnline = (state: RootState) => + state.connection.browserOnline; + +export const selectBackendStatus = (state: RootState) => + state.connection.backendStatus; + +export const selectBackendLastOkAt = (state: RootState) => + state.connection.backendLastOkAt; + +export const selectSseConnections = (state: RootState) => + state.connection.sseConnections; + +export const selectSseConnectionForChat = (state: RootState, chatId: string) => + state.connection.sseConnections[chatId]; + +export const selectSseStatusForChat = (state: RootState, chatId: string) => + state.connection.sseConnections[chatId]?.status ?? null; + +export const selectCurrentChatSseStatus = ( + state: RootState, +): SseStatus | null => { + const currentId = state.chat.current_thread_id; + if (!currentId) return null; + const conn = state.connection.sseConnections[currentId]; + return conn?.status ?? "disconnected"; +}; + +export const selectGlobalSseStatus = (state: RootState): SseStatus => { + const connections = Object.values(state.connection.sseConnections).filter( + (c): c is SseConnectionInfo => c !== undefined, + ); + if (connections.length === 0) return "disconnected"; + if (connections.some((c) => c.status === "connecting")) return "connecting"; + if (connections.every((c) => c.status === "connected")) return "connected"; + return "disconnected"; +}; + +export const selectIsFullyConnected = (state: RootState): boolean => { + if (!state.connection.browserOnline) return false; + if (state.connection.backendStatus !== "online") return false; + const sseStatus = selectCurrentChatSseStatus(state); + if (sseStatus === null) return true; + return sseStatus === "connected"; +}; + +export const selectConnectionProblem = (state: RootState): string | null => { + if (!state.connection.browserOnline) { + return "Browser is offline"; + } + if (state.connection.backendStatus === "offline") { + return "Backend server unreachable"; + } + if (state.connection.backendStatus === "unknown") { + return "Connecting to backend..."; + } + const currentSseStatus = selectCurrentChatSseStatus(state); + if (currentSseStatus === null) { + return null; + } + if (currentSseStatus === "disconnected") { + return "Real-time connection lost"; + } + if (currentSseStatus === "connecting") { + return "Connecting..."; + } + return null; +}; + +export const selectMaxRetryCount = (state: RootState): number => { + const connections = Object.values(state.connection.sseConnections).filter( + (c): c is SseConnectionInfo => c !== undefined, + ); + if (connections.length === 0) return 0; + return Math.max(...connections.map((c) => c.retryCount)); +}; + +export default connectionSlice.reducer; diff --git a/refact-agent/gui/src/features/Connection/index.ts b/refact-agent/gui/src/features/Connection/index.ts new file mode 100644 index 000000000..3bb2a12fc --- /dev/null +++ b/refact-agent/gui/src/features/Connection/index.ts @@ -0,0 +1,28 @@ +export { + connectionSlice, + setBrowserOnline, + setBackendStatus, + setSseStatus, + sseEventReceived, + resetSseRetryCount, + removeSseConnection, + clearAllSseConnections, + selectBrowserOnline, + selectBackendStatus, + selectBackendLastOkAt, + selectSseConnections, + selectSseConnectionForChat, + selectSseStatusForChat, + selectCurrentChatSseStatus, + selectGlobalSseStatus, + selectIsFullyConnected, + selectConnectionProblem, + selectMaxRetryCount, +} from "./connectionSlice"; + +export type { + BackendStatus, + SseStatus, + SseConnectionInfo, + ConnectionState, +} from "./connectionSlice"; diff --git a/refact-agent/gui/src/features/Customization/Customization.module.css b/refact-agent/gui/src/features/Customization/Customization.module.css new file mode 100644 index 000000000..c9495d22c --- /dev/null +++ b/refact-agent/gui/src/features/Customization/Customization.module.css @@ -0,0 +1,109 @@ +.configList { + padding: var(--space-1); +} + +.configItem { + cursor: pointer; + transition: background-color 0.15s; + padding: var(--space-2); +} + +.configItem:hover { + background-color: var(--gray-a3); +} + +.configItem.selected { + background-color: var(--accent-a4); + border-color: var(--accent-8); +} + +.configEditor { + padding: var(--space-2); + flex: 1; + display: flex; + flex-direction: column; + min-height: 0; + overflow: hidden; +} + +.formContainer { + flex: 1; + display: flex; + flex-direction: column; + min-height: 0; + overflow: hidden; +} + +.yamlEditor { + flex: 1; + min-height: 300px; + font-family: var(--code-font-family); + font-size: var(--font-size-1); + padding: var(--space-2); + border: 1px solid var(--gray-6); + border-radius: var(--radius-2); + background-color: var(--gray-2); + color: var(--gray-12); + resize: vertical; +} + +.yamlEditor:focus { + outline: none; + border-color: var(--accent-8); +} + +.editorHeader { + flex-wrap: wrap; + gap: var(--space-2); +} + +.scopeRow { + flex-wrap: wrap; + gap: var(--space-1); +} + +.panelContainer { + display: flex; + flex-direction: column; + height: calc(100vh - 120px); + min-height: 300px; +} + +.listPanel { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + flex: 1; + overflow-y: auto; + min-height: 0; +} + +.editorPanel { + flex: 1; + display: flex; + flex-direction: column; + min-height: 0; + overflow: hidden; +} + +.backButton { + margin-bottom: var(--space-2); + flex-shrink: 0; +} + +.compactConfigItem { + cursor: pointer; + padding: var(--space-1) var(--space-2); + border-radius: var(--radius-2); + transition: background-color 0.15s; + display: flex; + align-items: center; + justify-content: space-between; + gap: var(--space-2); +} + +.compactConfigItem:hover { + background-color: var(--gray-a3); +} + +.compactConfigItem.selected { + background-color: var(--accent-a4); +} diff --git a/refact-agent/gui/src/features/Customization/Customization.tsx b/refact-agent/gui/src/features/Customization/Customization.tsx new file mode 100644 index 000000000..31f6881b4 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/Customization.tsx @@ -0,0 +1,723 @@ +import React, { useState, useCallback, useRef, useEffect } from "react"; +import { + Flex, + Button, + Tabs, + Text, + Badge, + IconButton, + Dialog, + TextField, + SegmentedControl, + Card, +} from "@radix-ui/themes"; +import { + ArrowLeftIcon, + PlusIcon, + TrashIcon, + GlobeIcon, + FileIcon, + CodeIcon, + MixerHorizontalIcon, +} from "@radix-ui/react-icons"; + +import { ScrollArea } from "../../components/ScrollArea"; +import { PageWrapper } from "../../components/PageWrapper"; +import { Spinner } from "../../components/Spinner"; +import { + useGetRegistryQuery, + useGetConfigQuery, + useSaveConfigMutation, + useCreateConfigMutation, + useDeleteConfigMutation, + ConfigItem, + ConfigKind, +} from "../../services/refact/customization"; +import type { Config } from "../Config/configSlice"; +import { + CodeLensForm, + ToolboxCommandForm, + ModeForm, + SubagentForm, +} from "./components"; +import { + applyPatch, + isPlainObject, + sanitizeObject, + ConfigPatch, + validateConfigId, +} from "./components/configUtils"; + +import styles from "./Customization.module.css"; + +export type CustomizationProps = { + backFromCustomization: () => void; + host: Config["host"]; + tabbed: Config["tabbed"]; + initialKind?: ConfigKind; + initialConfigId?: string; +}; + +const KIND_LABELS: Record<ConfigKind, string> = { + modes: "Modes", + subagents: "Subagents", + toolbox_commands: "Toolbox", + code_lens: "Code Lens", +}; + +const ConfigList: React.FC<{ + items: ConfigItem[]; + selectedId: string | null; + onSelect: (id: string) => void; + onDelete: (id: string, scope: "global" | "local") => void; + onCreate: () => void; +}> = ({ items, selectedId, onSelect, onDelete, onCreate }) => { + return ( + <Flex direction="column" gap="1" className={styles.configList}> + <Button variant="soft" onClick={onCreate} size="1"> + <PlusIcon /> New + </Button> + {items.map((item) => ( + <div + key={item.id} + role="button" + tabIndex={0} + className={`${styles.compactConfigItem} ${ + selectedId === item.id ? styles.selected : "" + }`} + onClick={() => onSelect(item.id)} + onKeyDown={(e) => { + if (e.key === "Enter" || e.key === " ") { + e.preventDefault(); + onSelect(item.id); + } + }} + > + <Flex direction="column" gap="0" style={{ minWidth: 0, flex: 1 }}> + <Text + size="1" + weight="medium" + style={{ + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + }} + > + {item.title} + </Text> + <Flex align="center" gap="1"> + <Text + size="1" + color="gray" + style={{ + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + }} + > + {item.id} + </Text> + <Badge + size="1" + color={item.scope === "global" ? "blue" : "green"} + variant="soft" + > + {item.scope === "global" ? "G" : "L"} + </Badge> + </Flex> + </Flex> + <IconButton + size="1" + variant="ghost" + color="red" + onClick={(e) => { + e.stopPropagation(); + onDelete(item.id, item.scope); + }} + > + <TrashIcon /> + </IconButton> + </div> + ))} + {items.length === 0 && ( + <Text size="1" color="gray"> + No configs found + </Text> + )} + </Flex> + ); +}; + +type EditorView = "form" | "yaml"; + +const jsYamlPromise = import("js-yaml"); + +const ConfigEditor: React.FC<{ + kind: ConfigKind; + configId: string; + configItem: ConfigItem; + onSaved: () => void; +}> = ({ kind, configId, configItem, onSaved }) => { + const { data, isLoading, error } = useGetConfigQuery({ kind, id: configId }); + const [saveConfig, { isLoading: isSaving }] = useSaveConfigMutation(); + const [configJson, setConfigJson] = useState<Record<string, unknown> | null>( + null, + ); + const [yaml, setYaml] = useState<string>(""); + const [saveError, setSaveError] = useState<string | null>(null); + const [targetScope, setTargetScope] = useState<"global" | "local">( + configItem.scope, + ); + const [view, setView] = useState<EditorView>("form"); + const [yamlParseError, setYamlParseError] = useState<string | null>(null); + const yamlSyncTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null); + const syncVersionRef = useRef(0); + + useEffect(() => { + if (data) { + if (yamlSyncTimeoutRef.current) { + clearTimeout(yamlSyncTimeoutRef.current); + yamlSyncTimeoutRef.current = null; + } + syncVersionRef.current++; + setConfigJson(data.config); + setYaml(data.raw_yaml); + setYamlParseError(null); + } + }, [data]); + + useEffect(() => { + const versionRef = syncVersionRef; + return () => { + if (yamlSyncTimeoutRef.current) { + clearTimeout(yamlSyncTimeoutRef.current); + } + versionRef.current++; + }; + }, []); + + useEffect(() => { + setTargetScope(configItem.scope); + }, [configItem.scope]); + + const syncYamlToJson = useCallback( + async (yamlStr: string, version: number) => { + try { + const jsYaml = await jsYamlPromise; + if (version !== syncVersionRef.current) return; + const parsed = jsYaml.load(yamlStr); + if (!isPlainObject(parsed)) { + setYamlParseError("Config must be an object"); + return; + } + const sanitized = sanitizeObject(parsed) as Record<string, unknown>; + setConfigJson(sanitized); + setYamlParseError(null); + } catch (e) { + if (version !== syncVersionRef.current) return; + setYamlParseError(e instanceof Error ? e.message : String(e)); + } + }, + [], + ); + + const syncJsonToYaml = useCallback( + async (json: Record<string, unknown>, version: number) => { + try { + const jsYaml = await jsYamlPromise; + if (version !== syncVersionRef.current) return; + const yamlStr = jsYaml.dump(json, { + indent: 2, + lineWidth: -1, + noRefs: true, + }); + setYaml(yamlStr); + setYamlParseError(null); + } catch (e) { + if (version !== syncVersionRef.current) return; + setYamlParseError(e instanceof Error ? e.message : String(e)); + } + }, + [], + ); + + const handleYamlChange = useCallback( + (yamlStr: string) => { + setYaml(yamlStr); + if (yamlSyncTimeoutRef.current) clearTimeout(yamlSyncTimeoutRef.current); + yamlSyncTimeoutRef.current = setTimeout(() => { + const version = ++syncVersionRef.current; + void syncYamlToJson(yamlStr, version); + }, 300); + }, + [syncYamlToJson], + ); + + const handleFormPatch = useCallback( + (patch: ConfigPatch) => { + setConfigJson((prev) => { + if (!prev) return prev; + const updated = applyPatch(prev, patch); + if (yamlSyncTimeoutRef.current) + clearTimeout(yamlSyncTimeoutRef.current); + yamlSyncTimeoutRef.current = setTimeout(() => { + const version = ++syncVersionRef.current; + void syncJsonToYaml(updated, version); + }, 300); + return updated; + }); + }, + [syncJsonToYaml], + ); + + const handleSave = useCallback(async () => { + setSaveError(null); + if (!configJson) { + setSaveError("No config to save"); + return; + } + try { + const result = await saveConfig({ + kind, + id: configId, + config: configJson, + scope: targetScope, + }).unwrap(); + if (!result.ok && result.errors.length > 0) { + setSaveError(result.errors.map((e) => e.error).join(", ")); + } else { + onSaved(); + } + } catch (e) { + setSaveError(e instanceof Error ? e.message : String(e)); + } + }, [configJson, kind, configId, saveConfig, onSaved, targetScope]); + + if (isLoading) return <Spinner spinning />; + if (error) return <Text color="red">Error loading config</Text>; + if (!configJson) return <Text color="gray">Loading...</Text>; + + const canSaveToLocal = configItem.local_path !== ""; + const scopeChanged = targetScope !== configItem.scope; + + return ( + <Flex direction="column" gap="2" className={styles.configEditor}> + <Flex + justify="between" + align="center" + wrap="wrap" + gap="2" + className={styles.editorHeader} + > + <Text size="2" weight="bold"> + {configId} + </Text> + <Flex gap="1" align="center"> + <SegmentedControl.Root + size="1" + value={view} + onValueChange={(v) => setView(v as EditorView)} + > + <SegmentedControl.Item value="form"> + <MixerHorizontalIcon width={12} height={12} /> + </SegmentedControl.Item> + <SegmentedControl.Item value="yaml"> + <CodeIcon width={12} height={12} /> + </SegmentedControl.Item> + </SegmentedControl.Root> + <Button + size="1" + onClick={() => void handleSave()} + disabled={isSaving || !!yamlParseError} + > + {isSaving ? "..." : "Save"} + </Button> + </Flex> + </Flex> + {saveError && ( + <Text size="1" color="red"> + {saveError} + </Text> + )} + {yamlParseError && ( + <Text size="1" color="red"> + YAML: {yamlParseError} + </Text> + )} + <Flex align="center" gap="2" wrap="wrap" className={styles.scopeRow}> + {canSaveToLocal ? ( + <SegmentedControl.Root + size="1" + value={targetScope} + onValueChange={(v) => setTargetScope(v as "global" | "local")} + > + <SegmentedControl.Item value="global"> + <GlobeIcon width={10} height={10} /> + </SegmentedControl.Item> + <SegmentedControl.Item value="local"> + <FileIcon width={10} height={10} /> + </SegmentedControl.Item> + </SegmentedControl.Root> + ) : ( + <Badge size="1" color="blue" variant="soft"> + <GlobeIcon width={10} height={10} /> + </Badge> + )} + {scopeChanged && ( + <Badge size="1" color="orange"> + → {targetScope} + </Badge> + )} + </Flex> + {view === "form" ? ( + <div className={styles.formContainer}> + <FormEditor + kind={kind} + config={configJson} + onPatch={handleFormPatch} + /> + </div> + ) : ( + <textarea + className={styles.yamlEditor} + value={yaml} + onChange={(e) => handleYamlChange(e.target.value)} + spellCheck={false} + /> + )} + </Flex> + ); +}; + +const FormEditor: React.FC<{ + kind: ConfigKind; + config: Record<string, unknown>; + onPatch: (patch: ConfigPatch) => void; +}> = ({ kind, config, onPatch }) => { + switch (kind) { + case "code_lens": + return <CodeLensForm config={config} onPatch={onPatch} />; + case "toolbox_commands": + return <ToolboxCommandForm config={config} onPatch={onPatch} />; + case "modes": + return <ModeForm config={config} onPatch={onPatch} />; + case "subagents": + return <SubagentForm config={config} onPatch={onPatch} />; + } +}; + +const CreateConfigDialog: React.FC<{ + kind: ConfigKind; + open: boolean; + onOpenChange: (open: boolean) => void; + onCreated: (id: string) => void; + hasProjectRoot: boolean; +}> = ({ kind, open, onOpenChange, onCreated, hasProjectRoot }) => { + const [id, setId] = useState(""); + const [scope, setScope] = useState<"global" | "local">( + hasProjectRoot ? "local" : "global", + ); + const [createConfig, { isLoading }] = useCreateConfigMutation(); + const [error, setError] = useState<string | null>(null); + + React.useEffect(() => { + setScope(hasProjectRoot ? "local" : "global"); + }, [hasProjectRoot]); + + const handleCreate = useCallback(async () => { + setError(null); + const validationError = validateConfigId(id); + if (validationError) { + setError(validationError); + return; + } + const defaultConfig = getDefaultConfig(kind, id); + try { + const result = await createConfig({ + kind, + id, + config: defaultConfig, + scope, + }).unwrap(); + if (!result.ok && result.errors.length > 0) { + setError(result.errors.map((e) => e.error).join(", ")); + } else { + setId(""); + onOpenChange(false); + onCreated(id); + } + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + } + }, [kind, id, scope, createConfig, onOpenChange, onCreated]); + + return ( + <Dialog.Root open={open} onOpenChange={onOpenChange}> + <Dialog.Content style={{ maxWidth: 400 }}> + <Dialog.Title>Create {KIND_LABELS[kind]}</Dialog.Title> + <Flex direction="column" gap="3"> + <TextField.Root + placeholder="Config ID (e.g., my_mode)" + value={id} + onChange={(e) => setId(e.target.value)} + /> + <Flex direction="column" gap="1"> + <Text size="1">Save to:</Text> + {hasProjectRoot ? ( + <SegmentedControl.Root + size="1" + value={scope} + onValueChange={(v) => setScope(v as "global" | "local")} + > + <SegmentedControl.Item value="global"> + <Flex align="center" gap="1"> + <GlobeIcon width={12} height={12} /> + Global (~/.config/refact/) + </Flex> + </SegmentedControl.Item> + <SegmentedControl.Item value="local"> + <Flex align="center" gap="1"> + <FileIcon width={12} height={12} /> + Project (.refact/) + </Flex> + </SegmentedControl.Item> + </SegmentedControl.Root> + ) : ( + <Badge size="1" color="blue" variant="soft"> + <Flex align="center" gap="1"> + <GlobeIcon width={10} height={10} /> + Global only (no project open) + </Flex> + </Badge> + )} + </Flex> + {error && ( + <Text size="2" color="red"> + {error} + </Text> + )} + </Flex> + <Flex gap="3" mt="4" justify="end"> + <Dialog.Close> + <Button variant="soft" color="gray"> + Cancel + </Button> + </Dialog.Close> + <Button onClick={() => void handleCreate()} disabled={isLoading}> + {isLoading ? "Creating..." : "Create"} + </Button> + </Flex> + </Dialog.Content> + </Dialog.Root> + ); +}; + +function getDefaultConfig( + kind: ConfigKind, + id: string, +): Record<string, unknown> { + switch (kind) { + case "modes": + return { + schema_version: 1, + id, + title: id, + description: "", + specific: false, + prompt: "", + tools: [], + }; + case "subagents": + return { + schema_version: 1, + id, + title: id, + description: "", + specific: false, + expose_as_tool: true, + has_code: false, + subchat: { context_mode: "bare" }, + messages: {}, + }; + case "toolbox_commands": + return { + schema_version: 1, + id, + description: "", + messages: [], + }; + case "code_lens": + return { + schema_version: 1, + id, + label: id, + auto_submit: false, + new_tab: false, + messages: [], + }; + } +} + +export const Customization: React.FC<CustomizationProps> = ({ + backFromCustomization, + host, + tabbed, + initialKind = "modes", + initialConfigId, +}) => { + const [activeKind, setActiveKind] = useState<ConfigKind>(initialKind); + const [selectedConfigId, setSelectedConfigId] = useState<string | null>( + initialConfigId ?? null, + ); + const [createDialogOpen, setCreateDialogOpen] = useState(false); + + const { data: registry, isLoading, refetch } = useGetRegistryQuery(undefined); + const [deleteConfig] = useDeleteConfigMutation(); + + const getItemsForKind = (kind: ConfigKind): ConfigItem[] => { + if (!registry) return []; + switch (kind) { + case "modes": + return registry.modes; + case "subagents": + return registry.subagents; + case "toolbox_commands": + return registry.toolbox_commands; + case "code_lens": + return registry.code_lens; + } + }; + + const getAllItems = (): ConfigItem[] => { + if (!registry) return []; + return [ + ...registry.modes, + ...registry.subagents, + ...registry.toolbox_commands, + ...registry.code_lens, + ]; + }; + + const handleDelete = useCallback( + async (id: string, scope: "global" | "local") => { + if (!confirm(`Delete ${id} from ${scope}?`)) return; + await deleteConfig({ kind: activeKind, id, scope }); + if (selectedConfigId === id) { + setSelectedConfigId(null); + } + await refetch(); + }, + [activeKind, selectedConfigId, deleteConfig, refetch], + ); + + const handleTabChange = useCallback((value: string) => { + setActiveKind(value as ConfigKind); + setSelectedConfigId(null); + }, []); + + if (isLoading) return <Spinner spinning />; + + return ( + <PageWrapper host={host} noPadding> + {host === "vscode" && !tabbed ? ( + <Flex gap="2" pb="2"> + <Button variant="surface" onClick={backFromCustomization}> + <ArrowLeftIcon width="16" height="16" /> + Back + </Button> + </Flex> + ) : ( + <Button + mr="auto" + variant="outline" + onClick={backFromCustomization} + mb="2" + > + Back + </Button> + )} + + {registry?.errors && registry.errors.length > 0 && ( + <Card mb="3" style={{ backgroundColor: "var(--red-3)" }}> + <Text size="2" color="red"> + {registry.errors.length} config error(s):{" "} + {registry.errors.map((e) => e.error).join(", ")} + </Text> + </Card> + )} + + <Tabs.Root value={activeKind} onValueChange={handleTabChange}> + <Tabs.List size="1"> + {(Object.keys(KIND_LABELS) as ConfigKind[]).map((kind) => ( + <Tabs.Trigger key={kind} value={kind}> + {KIND_LABELS[kind]} ({getItemsForKind(kind).length}) + </Tabs.Trigger> + ))} + </Tabs.List> + + <div className={styles.panelContainer}> + {(() => { + if (!selectedConfigId) { + return ( + <ScrollArea scrollbars="vertical" className={styles.listPanel}> + <ConfigList + items={getItemsForKind(activeKind)} + selectedId={selectedConfigId} + onSelect={setSelectedConfigId} + onDelete={(id, scope) => void handleDelete(id, scope)} + onCreate={() => setCreateDialogOpen(true)} + /> + </ScrollArea> + ); + } + const selectedItem = getItemsForKind(activeKind).find( + (i) => i.id === selectedConfigId, + ); + if (!selectedItem) { + return ( + <ScrollArea scrollbars="vertical" className={styles.listPanel}> + <ConfigList + items={getItemsForKind(activeKind)} + selectedId={selectedConfigId} + onSelect={setSelectedConfigId} + onDelete={(id, scope) => void handleDelete(id, scope)} + onCreate={() => setCreateDialogOpen(true)} + /> + </ScrollArea> + ); + } + return ( + <div className={styles.editorPanel}> + <Button + variant="ghost" + size="1" + onClick={() => setSelectedConfigId(null)} + className={styles.backButton} + > + <ArrowLeftIcon /> Back to list + </Button> + <ConfigEditor + kind={activeKind} + configId={selectedConfigId} + configItem={selectedItem} + onSaved={() => void refetch()} + /> + </div> + ); + })()} + </div> + </Tabs.Root> + + <CreateConfigDialog + kind={activeKind} + open={createDialogOpen} + onOpenChange={setCreateDialogOpen} + onCreated={(id) => setSelectedConfigId(id)} + hasProjectRoot={ + registry?.has_project_root ?? + getAllItems().some((i) => i.local_path !== "") + } + /> + </PageWrapper> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/CodeLensForm.tsx b/refact-agent/gui/src/features/Customization/components/CodeLensForm.tsx new file mode 100644 index 000000000..2104da696 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/CodeLensForm.tsx @@ -0,0 +1,83 @@ +import React, { useCallback } from "react"; +import { Flex, TextField, Text, Switch } from "@radix-ui/themes"; +import { MessageListEditor } from "./MessageListEditor"; +import { + ConfigPatch, + safeString, + safeBoolean, + safeMessageArray, +} from "./configUtils"; + +type CodeLensFormProps = { + config: Record<string, unknown>; + onPatch: (patch: ConfigPatch) => void; +}; + +export const CodeLensForm: React.FC<CodeLensFormProps> = ({ + config, + onPatch, +}) => { + const label = safeString(config.label); + const autoSubmit = safeBoolean(config.auto_submit); + const newTab = safeBoolean(config.new_tab); + const messages = safeMessageArray(config.messages); + + const patch = useCallback( + (path: (string | number)[], value: unknown) => { + onPatch({ path, value }); + }, + [onPatch], + ); + + return ( + <Flex direction="column" gap="4"> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Label + </Text> + <TextField.Root + value={label} + onChange={(e) => patch(["label"], e.target.value)} + placeholder="Display label in editor" + /> + <Text size="1" color="gray"> + Text shown in the code lens above functions/classes + </Text> + </Flex> + + <Flex gap="4"> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Flex align="center" gap="2"> + <Switch + checked={autoSubmit} + onCheckedChange={(checked) => patch(["auto_submit"], checked)} + /> + <Text size="2">Auto Submit</Text> + </Flex> + <Text size="1" color="gray"> + Automatically send message when clicked + </Text> + </Flex> + + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Flex align="center" gap="2"> + <Switch + checked={newTab} + onCheckedChange={(checked) => patch(["new_tab"], checked)} + /> + <Text size="2">New Tab</Text> + </Flex> + <Text size="1" color="gray"> + Open in a new chat tab + </Text> + </Flex> + </Flex> + + <MessageListEditor + value={messages} + onChange={(msgs) => patch(["messages"], msgs)} + label="Messages" + /> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/MessageListEditor.tsx b/refact-agent/gui/src/features/Customization/components/MessageListEditor.tsx new file mode 100644 index 000000000..ac79180e1 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/MessageListEditor.tsx @@ -0,0 +1,189 @@ +import React, { useCallback, useState, useEffect } from "react"; +import { + Flex, + Button, + TextField, + IconButton, + TextArea, + Text, + DropdownMenu, +} from "@radix-ui/themes"; +import { + PlusIcon, + TrashIcon, + ChevronUpIcon, + ChevronDownIcon, + ChevronDownIcon as DropdownIcon, +} from "@radix-ui/react-icons"; +import styles from "./editors.module.css"; + +export type MessageTemplate = { + role: string; + content: string; +}; + +type InternalMessage = MessageTemplate & { _id: string }; + +type MessageListEditorProps = { + value: MessageTemplate[]; + onChange: (value: MessageTemplate[]) => void; + label?: string; +}; + +const COMMON_ROLES = ["system", "user", "assistant", "tool", "developer"]; + +let idCounter = 0; +const generateId = () => `msg_${++idCounter}_${Date.now()}`; + +const toInternal = (msgs: MessageTemplate[]): InternalMessage[] => + msgs.map((m) => ({ ...m, _id: generateId() })); + +const toExternal = (msgs: InternalMessage[]): MessageTemplate[] => + msgs.map(({ _id, ...rest }) => rest); + +export const MessageListEditor: React.FC<MessageListEditorProps> = ({ + value, + onChange, + label = "Messages", +}) => { + const [internal, setInternal] = useState<InternalMessage[]>(() => + toInternal(value), + ); + const valueKey = JSON.stringify(value); + + useEffect(() => { + setInternal(toInternal(value)); + // eslint-disable-next-line react-hooks/exhaustive-deps -- valueKey is derived from value, used for deep comparison + }, [valueKey]); + + const emit = useCallback( + (msgs: InternalMessage[]) => { + setInternal(msgs); + onChange(toExternal(msgs)); + }, + [onChange], + ); + + const addMessage = useCallback(() => { + emit([...internal, { role: "user", content: "", _id: generateId() }]); + }, [internal, emit]); + + const removeMessage = useCallback( + (id: string) => { + emit(internal.filter((m) => m._id !== id)); + }, + [internal, emit], + ); + + const updateMessage = useCallback( + (id: string, field: keyof MessageTemplate, fieldValue: string) => { + emit( + internal.map((m) => (m._id === id ? { ...m, [field]: fieldValue } : m)), + ); + }, + [internal, emit], + ); + + const moveMessage = useCallback( + (id: string, direction: -1 | 1) => { + const idx = internal.findIndex((m) => m._id === id); + const newIdx = idx + direction; + if (newIdx < 0 || newIdx >= internal.length) return; + const newInternal = [...internal]; + [newInternal[idx], newInternal[newIdx]] = [ + newInternal[newIdx], + newInternal[idx], + ]; + emit(newInternal); + }, + [internal, emit], + ); + + return ( + <Flex direction="column" gap="2"> + <Flex justify="between" align="center"> + <Text size="2" weight="medium"> + {label} + </Text> + <Button size="1" variant="soft" onClick={addMessage}> + <PlusIcon /> Add + </Button> + </Flex> + {value.length === 0 && ( + <Text size="1" color="gray"> + No messages defined + </Text> + )} + {internal.map((msg, index) => ( + <Flex + key={msg._id} + direction="column" + gap="2" + className={styles.messageItem} + > + <Flex gap="2" align="center" wrap="wrap"> + <Flex gap="1" align="center"> + <TextField.Root + size="1" + value={msg.role} + onChange={(e) => updateMessage(msg._id, "role", e.target.value)} + placeholder="Role" + style={{ width: 90 }} + /> + <DropdownMenu.Root> + <DropdownMenu.Trigger> + <IconButton size="1" variant="ghost"> + <DropdownIcon /> + </IconButton> + </DropdownMenu.Trigger> + <DropdownMenu.Content> + {COMMON_ROLES.map((role) => ( + <DropdownMenu.Item + key={role} + onSelect={() => updateMessage(msg._id, "role", role)} + > + {role} + </DropdownMenu.Item> + ))} + </DropdownMenu.Content> + </DropdownMenu.Root> + </Flex> + <Flex gap="1" ml="auto"> + <IconButton + size="1" + variant="ghost" + disabled={index === 0} + onClick={() => moveMessage(msg._id, -1)} + > + <ChevronUpIcon /> + </IconButton> + <IconButton + size="1" + variant="ghost" + disabled={index === internal.length - 1} + onClick={() => moveMessage(msg._id, 1)} + > + <ChevronDownIcon /> + </IconButton> + <IconButton + size="1" + variant="ghost" + color="red" + onClick={() => removeMessage(msg._id)} + > + <TrashIcon /> + </IconButton> + </Flex> + </Flex> + <TextArea + value={msg.content} + onChange={(e) => updateMessage(msg._id, "content", e.target.value)} + placeholder="Message content..." + rows={2} + className={styles.messageContent} + /> + </Flex> + ))} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/ModeForm.tsx b/refact-agent/gui/src/features/Customization/components/ModeForm.tsx new file mode 100644 index 000000000..88172e7b6 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/ModeForm.tsx @@ -0,0 +1,486 @@ +import React, { useState, useCallback, useMemo } from "react"; +import { + Flex, + TextField, + Text, + Switch, + TextArea, + Tabs, + Select, +} from "@radix-ui/themes"; +import { StringListEditor } from "./StringListEditor"; +import { RulesTableEditor } from "./RulesTableEditor"; +import { + ConfigPatch, + safeArray, + safeString, + safeBoolean, + safeObject, + isString, + safeToolConfirmRules, +} from "./configUtils"; +import { useGetCapsQuery } from "../../../services/refact/caps"; +import { useCapsForToolUse } from "../../../hooks"; +import { enrichAndGroupModels } from "../../../utils/enrichModels"; +import { RichModelSelectItem } from "../../../components/Select/RichModelSelectItem"; +import { + ModelSamplingParams, + type SamplingValues, +} from "../../../components/ModelSamplingParams"; +import styles from "./editors.module.css"; +import selectStyles from "../../../components/Select/select.module.css"; + +type ModeFormProps = { + config: Record<string, unknown>; + onPatch: (patch: ConfigPatch) => void; + availableTools?: string[]; +}; + +type ModelTypeSectionProps = { + title: string; + typeKey: "default" | "light" | "thinking"; + config: Record<string, unknown>; + groupedModels: ReturnType<typeof enrichAndGroupModels>; + onPatch: (path: (string | number)[], value: unknown) => void; +}; + +const ModelTypeSection: React.FC<ModelTypeSectionProps> = ({ + title, + typeKey, + config, + groupedModels, + onPatch, +}) => { + const model = safeString(config.model); + const toolChoice = + typeof config.tool_choice === "string" ? config.tool_choice : ""; + const parallelToolCalls = + typeof config.parallel_tool_calls === "boolean" + ? config.parallel_tool_calls + : false; + + const basePath = useMemo( + () => ["model_defaults", typeKey] as const, + [typeKey], + ); + + const samplingValues: SamplingValues = useMemo( + () => ({ + temperature: + typeof config.temperature === "number" ? config.temperature : undefined, + max_new_tokens: + typeof config.max_new_tokens === "number" + ? config.max_new_tokens + : undefined, + top_p: typeof config.top_p === "number" ? config.top_p : undefined, + boost_reasoning: + typeof config.boost_reasoning === "boolean" + ? config.boost_reasoning + : undefined, + reasoning_effort: + typeof config.reasoning_effort === "string" + ? config.reasoning_effort + : undefined, + thinking_budget: + typeof config.thinking_budget === "number" + ? config.thinking_budget + : undefined, + }), + [config], + ); + + const handleSamplingChange = useCallback( + <K extends keyof SamplingValues>(field: K, value: SamplingValues[K]) => { + onPatch([...basePath, field], value); + }, + [onPatch, basePath], + ); + + return ( + <Flex + direction="column" + gap="2" + p="2" + style={{ + border: "1px solid var(--gray-6)", + borderRadius: "var(--radius-2)", + }} + > + <Text size="1" weight="medium"> + {title} + </Text> + <Flex direction="column" gap="1"> + <Text size="1" color="gray"> + Model + </Text> + <Select.Root + value={model || "__inherit__"} + onValueChange={(v) => + onPatch([...basePath, "model"], v === "__inherit__" ? undefined : v) + } + size="1" + > + <Select.Trigger + placeholder="Inherit from global" + style={{ width: "100%" }} + /> + <Select.Content position="popper"> + <Select.Item value="__inherit__"> + <Text color="gray">Inherit from global</Text> + </Select.Item> + <Select.Separator /> + {groupedModels.map((group) => ( + <Select.Group key={group.provider}> + <Select.Label>{group.displayName}</Select.Label> + {group.models.map((m) => ( + <Select.Item + key={m.value} + value={m.value} + textValue={m.value} + > + <span className={selectStyles.trigger_only}>{m.value}</span> + <span className={selectStyles.dropdown_only}> + <RichModelSelectItem + displayName={m.value} + pricing={m.pricing} + nCtx={m.nCtx} + capabilities={m.capabilities} + isDefault={m.isDefault} + isThinking={m.isThinking} + isLight={m.isLight} + /> + </span> + </Select.Item> + ))} + </Select.Group> + ))} + </Select.Content> + </Select.Root> + </Flex> + + <ModelSamplingParams + model={model || undefined} + values={samplingValues} + onChange={handleSamplingChange} + /> + + <Flex gap="2" wrap="wrap" align="center"> + <Flex align="center" gap="1"> + <Switch + size="1" + checked={parallelToolCalls} + onCheckedChange={(c) => + onPatch([...basePath, "parallel_tool_calls"], c || undefined) + } + /> + <Text size="1">Parallel Tool Calls</Text> + </Flex> + <Flex direction="column" gap="1" style={{ flex: 1, minWidth: 80 }}> + <Text size="1" color="gray"> + Tool Choice + </Text> + <TextField.Root + size="1" + value={toolChoice} + placeholder="auto/none" + onChange={(e) => + onPatch([...basePath, "tool_choice"], e.target.value || undefined) + } + /> + </Flex> + </Flex> + </Flex> + ); +}; + +export const ModeForm: React.FC<ModeFormProps> = ({ + config, + onPatch, + availableTools = [], +}) => { + const [activeTab, setActiveTab] = useState("basic"); + + const title = safeString(config.title); + const description = safeString(config.description); + const specific = safeBoolean(config.specific); + const prompt = safeString(config.prompt); + const tools = safeArray(config.tools, isString); + const modelDefaults = safeObject(config.model_defaults); + const modelDefaultsDefault = safeObject(modelDefaults.default); + const modelDefaultsLight = safeObject(modelDefaults.light); + const modelDefaultsThinking = safeObject(modelDefaults.thinking); + const toolConfirmObj = safeObject(config.tool_confirm); + const toolConfirmRules = safeToolConfirmRules(toolConfirmObj.rules); + const threadDefaults = safeObject(config.thread_defaults); + const ui = safeObject(config.ui); + const base = typeof config.base === "string" ? config.base : undefined; + const matchModels = Array.isArray(config.match_models) + ? safeArray(config.match_models, isString) + : undefined; + + const patch = useCallback( + (path: (string | number)[], value: unknown) => { + onPatch({ path, value }); + }, + [onPatch], + ); + + const { data: capsData } = useGetCapsQuery(undefined); + const capsForToolUse = useCapsForToolUse(); + + // Use the same filtered model list as the main chat selector + const groupedModels = useMemo(() => { + return enrichAndGroupModels(capsForToolUse.usableModelsForPlan, capsData); + }, [capsForToolUse.usableModelsForPlan, capsData]); + + return ( + <Tabs.Root + value={activeTab} + onValueChange={setActiveTab} + style={{ + display: "flex", + flexDirection: "column", + flex: 1, + minHeight: 0, + }} + > + <Tabs.List> + <Tabs.Trigger value="basic">Basic</Tabs.Trigger> + <Tabs.Trigger value="tools">Tools</Tabs.Trigger> + <Tabs.Trigger value="llm">LLM Settings</Tabs.Trigger> + <Tabs.Trigger value="advanced">Advanced</Tabs.Trigger> + </Tabs.List> + + {activeTab === "basic" && ( + <div className={styles.formTabContentExpanding}> + <Flex direction="column" gap="3" style={{ flexShrink: 0 }}> + <Flex direction="column" gap="1"> + <Text size="1" weight="medium"> + Title + </Text> + <TextField.Root + size="1" + value={title} + onChange={(e) => patch(["title"], e.target.value)} + placeholder="Display name" + /> + </Flex> + + <Flex direction="column" gap="1"> + <Text size="1" weight="medium"> + Description + </Text> + <TextField.Root + size="1" + value={description} + onChange={(e) => patch(["description"], e.target.value)} + placeholder="Brief description" + /> + </Flex> + + <Flex align="center" gap="2"> + <Switch + size="1" + checked={specific} + onCheckedChange={(checked) => patch(["specific"], checked)} + /> + <Text size="1">Internal Only</Text> + <Text size="1" color="gray"> + (hide from mode selector) + </Text> + </Flex> + </Flex> + + <div className={styles.expandingField}> + <Text size="1" weight="medium"> + System Prompt + </Text> + <TextArea + value={prompt} + onChange={(e) => patch(["prompt"], e.target.value)} + placeholder="System prompt for this mode..." + className={styles.promptTextareaExpand} + /> + <Text size="1" color="gray"> + Supports: %PROJECT_TREE%, %WORKSPACE_INFO%, %ARGS%, etc. + </Text> + </div> + </div> + )} + + {activeTab === "tools" && ( + <div className={styles.formTabContent}> + <StringListEditor + value={tools} + onChange={(t) => patch(["tools"], t)} + label="Available Tools" + placeholder="Add tool..." + suggestions={availableTools} + /> + + <RulesTableEditor + value={toolConfirmRules} + onChange={(rules) => patch(["tool_confirm", "rules"], rules)} + label="Tool Confirmation Rules" + /> + </div> + )} + + {activeTab === "llm" && ( + <div className={styles.formTabContent}> + <Flex direction="column" gap="3"> + <ModelTypeSection + title="Default Model" + typeKey="default" + config={modelDefaultsDefault} + groupedModels={groupedModels} + onPatch={patch} + /> + <ModelTypeSection + title="Light Model" + typeKey="light" + config={modelDefaultsLight} + groupedModels={groupedModels} + onPatch={patch} + /> + <ModelTypeSection + title="Thinking Model" + typeKey="thinking" + config={modelDefaultsThinking} + groupedModels={groupedModels} + onPatch={patch} + /> + </Flex> + </div> + )} + + {activeTab === "advanced" && ( + <div className={styles.formTabContent}> + <Flex direction="column" gap="2"> + <Text size="1" weight="medium"> + Thread Defaults + </Text> + <Flex gap="3" wrap="wrap"> + <Flex align="center" gap="1"> + <Switch + size="1" + checked={ + typeof threadDefaults.include_project_info === "boolean" + ? threadDefaults.include_project_info + : false + } + onCheckedChange={(checked) => + patch( + ["thread_defaults", "include_project_info"], + checked || undefined, + ) + } + /> + <Text size="1">Project Info</Text> + </Flex> + <Flex align="center" gap="1"> + <Switch + size="1" + checked={ + typeof threadDefaults.checkpoints_enabled === "boolean" + ? threadDefaults.checkpoints_enabled + : false + } + onCheckedChange={(checked) => + patch( + ["thread_defaults", "checkpoints_enabled"], + checked || undefined, + ) + } + /> + <Text size="1">Checkpoints</Text> + </Flex> + <Flex align="center" gap="1"> + <Switch + size="1" + checked={ + typeof threadDefaults.auto_approve_editing_tools === + "boolean" + ? threadDefaults.auto_approve_editing_tools + : false + } + onCheckedChange={(checked) => + patch( + ["thread_defaults", "auto_approve_editing_tools"], + checked || undefined, + ) + } + /> + <Text size="1">Auto Approve Editing</Text> + </Flex> + <Flex align="center" gap="1"> + <Switch + size="1" + checked={ + typeof threadDefaults.auto_approve_dangerous_commands === + "boolean" + ? threadDefaults.auto_approve_dangerous_commands + : false + } + onCheckedChange={(checked) => + patch( + ["thread_defaults", "auto_approve_dangerous_commands"], + checked || undefined, + ) + } + /> + <Text size="1">Auto Approve Dangerous</Text> + </Flex> + </Flex> + </Flex> + + <Flex direction="column" gap="1"> + <Text size="1" weight="medium"> + Base Mode + </Text> + <TextField.Root + size="1" + value={base ?? ""} + onChange={(e) => patch(["base"], e.target.value || undefined)} + placeholder="Inherit from (e.g., agent)" + /> + </Flex> + + <StringListEditor + value={matchModels ?? []} + onChange={(models) => + patch(["match_models"], models.length > 0 ? models : undefined) + } + label="Match Models" + placeholder="Model pattern..." + /> + + <Flex gap="2" wrap="wrap"> + <Flex direction="column" gap="1" style={{ minWidth: 80 }}> + <Text size="1">UI Order</Text> + <TextField.Root + size="1" + type="number" + value={typeof ui.order === "number" ? ui.order.toString() : ""} + onChange={(e) => + patch( + ["ui", "order"], + e.target.value ? parseInt(e.target.value, 10) : undefined, + ) + } + placeholder="Order" + /> + </Flex> + </Flex> + + <StringListEditor + value={safeArray(ui.tags, isString)} + onChange={(tags) => patch(["ui", "tags"], tags)} + label="UI Tags" + placeholder="Add tag..." + /> + </div> + )} + </Tabs.Root> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/RulesTableEditor.tsx b/refact-agent/gui/src/features/Customization/components/RulesTableEditor.tsx new file mode 100644 index 000000000..edde31ee3 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/RulesTableEditor.tsx @@ -0,0 +1,146 @@ +import React, { useCallback, useState, useEffect } from "react"; +import { + Flex, + Button, + TextField, + IconButton, + Text, + DropdownMenu, +} from "@radix-ui/themes"; +import { PlusIcon, TrashIcon, ChevronDownIcon } from "@radix-ui/react-icons"; + +export type ToolConfirmRule = { + match: string; + action: string; +}; + +type InternalRule = { match: string; action: string; _id: string }; + +type RulesTableEditorProps = { + value: ToolConfirmRule[]; + onChange: (value: ToolConfirmRule[]) => void; + label?: string; +}; + +const COMMON_ACTIONS = ["auto", "allow", "deny", "ask"]; + +let idCounter = 0; +const generateId = () => `rule_${++idCounter}_${Date.now()}`; + +const toInternal = (rules: ToolConfirmRule[]): InternalRule[] => + rules.map((r) => ({ ...r, _id: generateId() })); + +const toExternal = (rules: InternalRule[]): ToolConfirmRule[] => + rules.map(({ _id, ...rest }) => rest); + +export const RulesTableEditor: React.FC<RulesTableEditorProps> = ({ + value, + onChange, + label = "Tool Confirmation Rules", +}) => { + const [internal, setInternal] = useState<InternalRule[]>(() => + toInternal(value), + ); + const valueKey = JSON.stringify(value); + + useEffect(() => { + setInternal(toInternal(value)); + // eslint-disable-next-line react-hooks/exhaustive-deps -- valueKey is derived from value, used for deep comparison + }, [valueKey]); + + const emit = useCallback( + (rules: InternalRule[]) => { + setInternal(rules); + onChange(toExternal(rules)); + }, + [onChange], + ); + + const addRule = useCallback(() => { + emit([...internal, { match: "*", action: "ask", _id: generateId() }]); + }, [internal, emit]); + + const removeRule = useCallback( + (id: string) => { + emit(internal.filter((r) => r._id !== id)); + }, + [internal, emit], + ); + + const updateRule = useCallback( + (id: string, field: "match" | "action", fieldValue: string) => { + emit( + internal.map((r) => (r._id === id ? { ...r, [field]: fieldValue } : r)), + ); + }, + [internal, emit], + ); + + return ( + <Flex direction="column" gap="2"> + <Flex justify="between" align="center"> + <Text size="2" weight="medium"> + {label} + </Text> + <Button size="1" variant="soft" onClick={addRule}> + <PlusIcon /> Add Rule + </Button> + </Flex> + {internal.length === 0 ? ( + <Text size="1" color="gray"> + No rules defined + </Text> + ) : ( + <Flex direction="column" gap="2"> + {internal.map((rule) => ( + <Flex key={rule._id} gap="2" align="center" wrap="wrap"> + <TextField.Root + size="1" + value={rule.match} + onChange={(e) => updateRule(rule._id, "match", e.target.value)} + placeholder="Pattern (e.g., shell:*)" + style={{ flex: 1, minWidth: 100 }} + /> + <Flex gap="1" align="center"> + <TextField.Root + size="1" + value={rule.action} + onChange={(e) => + updateRule(rule._id, "action", e.target.value) + } + placeholder="Action" + style={{ width: 70 }} + /> + <DropdownMenu.Root> + <DropdownMenu.Trigger> + <IconButton size="1" variant="ghost"> + <ChevronDownIcon /> + </IconButton> + </DropdownMenu.Trigger> + <DropdownMenu.Content> + {COMMON_ACTIONS.map((action) => ( + <DropdownMenu.Item + key={action} + onSelect={() => updateRule(rule._id, "action", action)} + > + {action} + </DropdownMenu.Item> + ))} + </DropdownMenu.Content> + </DropdownMenu.Root> + </Flex> + <IconButton + size="1" + variant="ghost" + color="red" + onClick={() => removeRule(rule._id)} + > + <TrashIcon /> + </IconButton> + </Flex> + ))} + </Flex> + )} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/StringListEditor.tsx b/refact-agent/gui/src/features/Customization/components/StringListEditor.tsx new file mode 100644 index 000000000..258c6feed --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/StringListEditor.tsx @@ -0,0 +1,131 @@ +import React, { useCallback, useState } from "react"; +import { + Flex, + Button, + TextField, + IconButton, + Text, + Badge, +} from "@radix-ui/themes"; +import { PlusIcon, Cross2Icon } from "@radix-ui/react-icons"; +import styles from "./editors.module.css"; + +type StringListEditorProps = { + value: string[]; + onChange: (value: string[]) => void; + label?: string; + placeholder?: string; + suggestions?: string[]; +}; + +export const StringListEditor: React.FC<StringListEditorProps> = ({ + value, + onChange, + label = "Items", + placeholder = "Add item...", + suggestions = [], +}) => { + const [inputValue, setInputValue] = useState(""); + const [showSuggestions, setShowSuggestions] = useState(false); + + const addItem = useCallback( + (item: string) => { + const trimmed = item.trim(); + if (trimmed && !value.includes(trimmed)) { + onChange([...value, trimmed]); + } + setInputValue(""); + setShowSuggestions(false); + }, + [value, onChange], + ); + + const removeItem = useCallback( + (index: number) => { + onChange(value.filter((_, i) => i !== index)); + }, + [value, onChange], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + e.preventDefault(); + addItem(inputValue); + } + }, + [inputValue, addItem], + ); + + const filteredSuggestions = suggestions + .filter( + (s) => + !value.includes(s) && + s.toLowerCase().includes(inputValue.toLowerCase()), + ) + .slice(0, 10); + + return ( + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + {label} + </Text> + <Flex gap="1" wrap="wrap"> + {value.map((item, index) => ( + <Badge + key={index} + size="2" + variant="soft" + className={styles.tagBadge} + > + {item} + <IconButton + size="1" + variant="ghost" + className={styles.tagRemove} + onClick={() => removeItem(index)} + > + <Cross2Icon width={10} height={10} /> + </IconButton> + </Badge> + ))} + </Flex> + <Flex gap="2" style={{ position: "relative" }}> + <TextField.Root + value={inputValue} + onChange={(e) => { + setInputValue(e.target.value); + setShowSuggestions(true); + }} + onKeyDown={handleKeyDown} + onFocus={() => setShowSuggestions(true)} + onBlur={() => setTimeout(() => setShowSuggestions(false), 200)} + placeholder={placeholder} + style={{ flex: 1 }} + /> + <Button + size="2" + variant="soft" + onClick={() => addItem(inputValue)} + disabled={!inputValue.trim()} + > + <PlusIcon /> + </Button> + {showSuggestions && filteredSuggestions.length > 0 && ( + <Flex direction="column" className={styles.suggestions}> + {filteredSuggestions.map((suggestion) => ( + <button + key={suggestion} + type="button" + className={styles.suggestionItem} + onMouseDown={() => addItem(suggestion)} + > + {suggestion} + </button> + ))} + </Flex> + )} + </Flex> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/SubagentForm.tsx b/refact-agent/gui/src/features/Customization/components/SubagentForm.tsx new file mode 100644 index 000000000..08d5ccbbd --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/SubagentForm.tsx @@ -0,0 +1,652 @@ +import React, { useState, useCallback, useEffect } from "react"; +import { + Flex, + TextField, + Text, + Switch, + TextArea, + Tabs, + Button, +} from "@radix-ui/themes"; +import { StringListEditor } from "./StringListEditor"; +import { ToolParametersEditor, ToolParameter } from "./ToolParametersEditor"; +import { MessageListEditor } from "./MessageListEditor"; +import { + ConfigPatch, + extractSubagentExtra, + computeExtraPatches, + safeArray, + safeString, + safeBoolean, + safeObject, + isString, + isPlainObject, + sanitizeObject, + safeNumber, + safeMessageArray, + parseIntSafe, + parseFloatSafe, +} from "./configUtils"; +import styles from "./editors.module.css"; + +type SubagentFormProps = { + config: Record<string, unknown>; + onPatch: (patch: ConfigPatch) => void; + availableTools?: string[]; +}; + +export const SubagentForm: React.FC<SubagentFormProps> = ({ + config, + onPatch, + availableTools = [], +}) => { + const [activeTab, setActiveTab] = useState("basic"); + const [extraJson, setExtraJson] = useState(""); + const [extraJsonDirty, setExtraJsonDirty] = useState(false); + const [extraJsonError, setExtraJsonError] = useState<string | null>(null); + + const extra = extractSubagentExtra(config); + const configId = safeString(config.id); + + useEffect(() => { + if (!extraJsonDirty) { + const newExtra = extractSubagentExtra(config); + const newJson = + Object.keys(newExtra).length === 0 + ? "" + : JSON.stringify(newExtra, null, 2); + setExtraJson(newJson); + setExtraJsonError(null); + } + }, [configId, config, extraJsonDirty]); + + const title = safeString(config.title); + const description = safeString(config.description); + const specific = safeBoolean(config.specific); + const exposeAsTool = safeBoolean(config.expose_as_tool); + const hasCode = safeBoolean(config.has_code); + const tools = safeArray(config.tools, isString); + const tool = config.tool !== undefined ? safeObject(config.tool) : undefined; + const subchat = safeObject(config.subchat); + const messages = safeObject(config.messages); + const prompts = safeObject(config.prompts); + const gatherFiles = safeObject(config.gather_files); + const base = typeof config.base === "string" ? config.base : undefined; + const matchModels = Array.isArray(config.match_models) + ? safeArray(config.match_models, isString) + : undefined; + + const patch = useCallback( + (path: (string | number)[], value: unknown) => { + onPatch({ path, value }); + }, + [onPatch], + ); + + const handleExtraChange = useCallback((text: string) => { + setExtraJson(text); + setExtraJsonDirty(true); + setExtraJsonError(null); + }, []); + + const applyExtraChanges = useCallback(() => { + try { + const parsed: unknown = extraJson.trim() ? JSON.parse(extraJson) : {}; + if (!isPlainObject(parsed)) { + setExtraJsonError("Extra fields must be a JSON object"); + return; + } + const newExtra = sanitizeObject(parsed) as Record<string, unknown>; + const patches = computeExtraPatches(extra, newExtra); + for (const p of patches) { + onPatch(p); + } + setExtraJsonDirty(false); + setExtraJsonError(null); + } catch (e) { + setExtraJsonError(e instanceof Error ? e.message : "Invalid JSON"); + } + }, [extraJson, extra, onPatch]); + + return ( + <Tabs.Root value={activeTab} onValueChange={setActiveTab}> + <Tabs.List> + <Tabs.Trigger value="basic">Basic</Tabs.Trigger> + <Tabs.Trigger value="tool">Tool Schema</Tabs.Trigger> + <Tabs.Trigger value="subchat">Subchat</Tabs.Trigger> + <Tabs.Trigger value="messages">Messages</Tabs.Trigger> + <Tabs.Trigger value="advanced">Advanced</Tabs.Trigger> + </Tabs.List> + + <Flex direction="column" gap="4" pt="4"> + {activeTab === "basic" && ( + <BasicTab + title={title} + description={description} + specific={specific} + exposeAsTool={exposeAsTool} + hasCode={hasCode} + tools={tools} + patch={patch} + availableTools={availableTools} + /> + )} + {activeTab === "tool" && <ToolTab tool={tool} patch={patch} />} + {activeTab === "subchat" && ( + <SubchatTab subchat={subchat} patch={patch} /> + )} + {activeTab === "messages" && ( + <MessagesTab messages={messages} prompts={prompts} patch={patch} /> + )} + {activeTab === "advanced" && ( + <AdvancedTab + base={base} + matchModels={matchModels} + gatherFiles={gatherFiles} + extraJson={extraJson} + extraJsonDirty={extraJsonDirty} + extraJsonError={extraJsonError} + onExtraChange={handleExtraChange} + onExtraApply={applyExtraChanges} + patch={patch} + /> + )} + </Flex> + </Tabs.Root> + ); +}; + +type PatchFn = (path: (string | number)[], value: unknown) => void; + +const BasicTab: React.FC<{ + title: string; + description: string; + specific: boolean; + exposeAsTool: boolean; + hasCode: boolean; + tools: string[]; + patch: PatchFn; + availableTools: string[]; +}> = ({ + title, + description, + specific, + exposeAsTool, + hasCode, + tools, + patch, + availableTools, +}) => ( + <> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Title + </Text> + <TextField.Root + value={title} + onChange={(e) => patch(["title"], e.target.value)} + placeholder="Display name" + /> + </Flex> + + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Description + </Text> + <TextArea + value={description} + onChange={(e) => patch(["description"], e.target.value)} + placeholder="What this subagent does..." + rows={2} + /> + </Flex> + + <Flex gap="4" wrap="wrap"> + <Flex align="center" gap="2"> + <Switch + checked={specific} + onCheckedChange={(c) => patch(["specific"], c)} + /> + <Text size="2">Internal Only</Text> + </Flex> + <Flex align="center" gap="2"> + <Switch + checked={exposeAsTool} + onCheckedChange={(c) => patch(["expose_as_tool"], c)} + /> + <Text size="2">Expose as Tool</Text> + </Flex> + <Flex align="center" gap="2"> + <Switch + checked={hasCode} + onCheckedChange={(c) => patch(["has_code"], c)} + /> + <Text size="2">Has Code</Text> + </Flex> + </Flex> + + <StringListEditor + value={tools} + onChange={(t) => patch(["tools"], t)} + label="Available Tools" + placeholder="Add tool..." + suggestions={availableTools} + /> + </> +); + +const ToolTab: React.FC<{ + tool: Record<string, unknown> | undefined; + patch: PatchFn; +}> = ({ tool, patch }) => { + const hasTool = tool !== undefined; + const toolDesc = + typeof tool?.description === "string" ? tool.description : ""; + const agentic = typeof tool?.agentic === "boolean" ? tool.agentic : false; + const allowParallel = + typeof tool?.allow_parallel === "boolean" ? tool.allow_parallel : false; + const parameters = Array.isArray(tool?.parameters) + ? (tool.parameters as ToolParameter[]) + : []; + const required = Array.isArray(tool?.required) + ? (tool.required as string[]) + : []; + + return ( + <> + <Flex align="center" gap="2"> + <Switch + checked={hasTool} + onCheckedChange={(checked) => { + if (checked) { + patch(["tool"], { + description: "", + agentic: false, + allow_parallel: false, + parameters: [], + required: [], + }); + } else { + patch(["tool"], undefined); + } + }} + /> + <Text size="2">Define Custom Tool Schema</Text> + </Flex> + + {hasTool && ( + <> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Tool Description + </Text> + <TextArea + value={toolDesc} + onChange={(e) => patch(["tool", "description"], e.target.value)} + placeholder="Description shown to the LLM..." + rows={2} + /> + </Flex> + + <Flex align="center" gap="2"> + <Switch + checked={agentic} + onCheckedChange={(c) => patch(["tool", "agentic"], c)} + /> + <Text size="2">Agentic</Text> + <Text size="1" color="gray"> + (tool can make multiple calls) + </Text> + </Flex> + + <Flex align="center" gap="2"> + <Switch + checked={allowParallel} + onCheckedChange={(c) => patch(["tool", "allow_parallel"], c)} + /> + <Text size="2">Allow Parallel</Text> + <Text size="1" color="gray"> + (tool can run concurrently with other parallel tools) + </Text> + </Flex> + + <ToolParametersEditor + parameters={parameters} + required={required} + onParametersChange={(p) => patch(["tool", "parameters"], p)} + onRequiredChange={(r) => patch(["tool", "required"], r)} + /> + </> + )} + </> + ); +}; + +const SubchatTab: React.FC<{ + subchat: Record<string, unknown>; + patch: PatchFn; +}> = ({ subchat, patch }) => { + return ( + <> + <Flex gap="4"> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Context Mode + </Text> + <TextField.Root + value={safeString(subchat.context_mode) || "bare"} + onChange={(e) => patch(["subchat", "context_mode"], e.target.value)} + placeholder="bare / full / ..." + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Model + </Text> + <TextField.Root + value={safeString(subchat.model)} + onChange={(e) => + patch(["subchat", "model"], e.target.value || undefined) + } + placeholder="Default" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Model Type + </Text> + <TextField.Root + value={safeString(subchat.model_type)} + onChange={(e) => + patch(["subchat", "model_type"], e.target.value || undefined) + } + placeholder="Default" + /> + </Flex> + </Flex> + + <Flex align="center" gap="2"> + <Switch + checked={safeBoolean(subchat.stateful)} + onCheckedChange={(c) => patch(["subchat", "stateful"], c)} + /> + <Text size="2">Stateful</Text> + </Flex> + + <Flex gap="4"> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Max Steps + </Text> + <TextField.Root + type="number" + value={safeNumber(subchat.max_steps)?.toString() ?? ""} + onChange={(e) => + patch(["subchat", "max_steps"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + N Context + </Text> + <TextField.Root + type="number" + value={safeNumber(subchat.n_ctx)?.toString() ?? ""} + onChange={(e) => + patch(["subchat", "n_ctx"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Max New Tokens + </Text> + <TextField.Root + type="number" + value={safeNumber(subchat.max_new_tokens)?.toString() ?? ""} + onChange={(e) => + patch(["subchat", "max_new_tokens"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + </Flex> + + <Flex gap="4"> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Temperature + </Text> + <TextField.Root + type="number" + step="0.1" + value={safeNumber(subchat.temperature)?.toString() ?? ""} + onChange={(e) => + patch(["subchat", "temperature"], parseFloatSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Reasoning Effort + </Text> + <TextField.Root + value={safeString(subchat.reasoning_effort)} + onChange={(e) => + patch( + ["subchat", "reasoning_effort"], + e.target.value || undefined, + ) + } + placeholder="low / medium / high / xhigh / max" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="2" weight="medium"> + Tokens for RAG + </Text> + <TextField.Root + type="number" + value={safeNumber(subchat.tokens_for_rag)?.toString() ?? ""} + onChange={(e) => + patch(["subchat", "tokens_for_rag"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + </Flex> + </> + ); +}; + +const MessagesTab: React.FC<{ + messages: Record<string, unknown>; + prompts: Record<string, unknown>; + patch: PatchFn; +}> = ({ messages, prompts, patch }) => ( + <> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + System Prompt + </Text> + <TextArea + value={safeString(messages.system_prompt)} + onChange={(e) => + patch(["messages", "system_prompt"], e.target.value || undefined) + } + placeholder="System prompt..." + className={styles.promptTextarea} + /> + </Flex> + + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + User Template + </Text> + <TextArea + value={safeString(messages.user_template)} + onChange={(e) => + patch(["messages", "user_template"], e.target.value || undefined) + } + placeholder="User message template..." + rows={3} + /> + </Flex> + + <MessageListEditor + value={safeMessageArray(messages.pre_messages)} + onChange={(m) => patch(["messages", "pre_messages"], m)} + label="Pre-Messages" + /> + + <MessageListEditor + value={safeMessageArray(messages.post_messages)} + onChange={(m) => patch(["messages", "post_messages"], m)} + label="Post-Messages" + /> + + <Text size="2" weight="medium" mt="2"> + Prompts + </Text> + {( + [ + "solver", + "reviewer", + "guardrails", + "gather_system", + "gather_retry", + ] as const + ).map((key) => ( + <Flex key={key} direction="column" gap="1"> + <Text size="1" color="gray"> + {key.replace("_", " ")} + </Text> + <TextArea + value={safeString(prompts[key])} + onChange={(e) => patch(["prompts", key], e.target.value || undefined)} + placeholder={`${key} prompt...`} + rows={2} + /> + </Flex> + ))} + </> +); + +const AdvancedTab: React.FC<{ + base: string | undefined; + matchModels: string[] | undefined; + gatherFiles: Record<string, unknown>; + extraJson: string; + extraJsonDirty: boolean; + extraJsonError: string | null; + onExtraChange: (text: string) => void; + onExtraApply: () => void; + patch: PatchFn; +}> = ({ + base, + matchModels, + gatherFiles, + extraJson, + extraJsonDirty, + extraJsonError, + onExtraChange, + onExtraApply, + patch, +}) => { + return ( + <> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Base Subagent + </Text> + <TextField.Root + value={base ?? ""} + onChange={(e) => patch(["base"], e.target.value || undefined)} + placeholder="Inherit from another subagent" + /> + </Flex> + + <StringListEditor + value={matchModels ?? []} + onChange={(m) => patch(["match_models"], m.length > 0 ? m : undefined)} + label="Match Models" + placeholder="Model pattern..." + /> + + <Text size="2" weight="medium"> + Gather Files + </Text> + <Flex gap="4"> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="1" color="gray"> + Subagent + </Text> + <TextField.Root + value={safeString(gatherFiles.subagent)} + onChange={(e) => + patch(["gather_files", "subagent"], e.target.value || undefined) + } + placeholder="Subagent name" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="1" color="gray"> + Max Files + </Text> + <TextField.Root + type="number" + value={safeNumber(gatherFiles.max_files)?.toString() ?? ""} + onChange={(e) => + patch(["gather_files", "max_files"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + <Flex direction="column" gap="2" style={{ flex: 1 }}> + <Text size="1" color="gray"> + Max Steps + </Text> + <TextField.Root + type="number" + value={safeNumber(gatherFiles.max_steps)?.toString() ?? ""} + onChange={(e) => + patch(["gather_files", "max_steps"], parseIntSafe(e.target.value)) + } + placeholder="Default" + /> + </Flex> + </Flex> + + <Flex direction="column" gap="2"> + <Flex justify="between" align="center"> + <Text size="2" weight="medium"> + Extra Fields (JSON) + </Text> + {extraJsonDirty && ( + <Button size="1" variant="soft" onClick={onExtraApply}> + Apply + </Button> + )} + </Flex> + <Text size="1" color="gray"> + Unknown/custom fields at top level + </Text> + <TextArea + value={extraJson} + onChange={(e) => onExtraChange(e.target.value)} + placeholder="{}" + className={styles.extraFieldsEditor} + /> + {extraJsonError && ( + <Text size="1" color="red"> + {extraJsonError} + </Text> + )} + </Flex> + </> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/ToolParametersEditor.tsx b/refact-agent/gui/src/features/Customization/components/ToolParametersEditor.tsx new file mode 100644 index 000000000..7e4c8ad11 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/ToolParametersEditor.tsx @@ -0,0 +1,177 @@ +import React, { useCallback } from "react"; +import { + Flex, + Button, + TextField, + Select, + IconButton, + Text, + Table, + Checkbox, +} from "@radix-ui/themes"; +import { PlusIcon, TrashIcon } from "@radix-ui/react-icons"; + +export type ToolParameter = { + name: string; + type: string; + description: string; + default?: unknown; +}; + +type ToolParametersEditorProps = { + parameters: ToolParameter[]; + required: string[]; + onParametersChange: (value: ToolParameter[]) => void; + onRequiredChange: (value: string[]) => void; + label?: string; +}; + +const PARAM_TYPES = [ + "string", + "integer", + "number", + "boolean", + "array", + "object", +]; + +export const ToolParametersEditor: React.FC<ToolParametersEditorProps> = ({ + parameters, + required, + onParametersChange, + onRequiredChange, + label = "Tool Parameters", +}) => { + const addParameter = useCallback(() => { + onParametersChange([ + ...parameters, + { name: "", type: "string", description: "" }, + ]); + }, [parameters, onParametersChange]); + + const removeParameter = useCallback( + (index: number) => { + const param = parameters[index] as ToolParameter | undefined; + onParametersChange(parameters.filter((_, i) => i !== index)); + if (param !== undefined && required.includes(param.name)) { + onRequiredChange(required.filter((r) => r !== param.name)); + } + }, + [parameters, required, onParametersChange, onRequiredChange], + ); + + const updateParameter = useCallback( + (index: number, field: keyof ToolParameter, value: string) => { + const oldName = parameters[index].name; + const newParams = parameters.map((p, i) => + i === index ? { ...p, [field]: value } : p, + ); + onParametersChange(newParams); + if (field === "name" && required.includes(oldName)) { + onRequiredChange(required.map((r) => (r === oldName ? value : r))); + } + }, + [parameters, required, onParametersChange, onRequiredChange], + ); + + const toggleRequired = useCallback( + (name: string, isRequired: boolean) => { + if (isRequired) { + onRequiredChange([...required, name]); + } else { + onRequiredChange(required.filter((r) => r !== name)); + } + }, + [required, onRequiredChange], + ); + + return ( + <Flex direction="column" gap="2"> + <Flex justify="between" align="center"> + <Text size="2" weight="medium"> + {label} + </Text> + <Button size="1" variant="soft" onClick={addParameter}> + <PlusIcon /> Add Parameter + </Button> + </Flex> + {parameters.length === 0 ? ( + <Text size="1" color="gray"> + No parameters defined + </Text> + ) : ( + <Table.Root size="1"> + <Table.Header> + <Table.Row> + <Table.ColumnHeaderCell>Name</Table.ColumnHeaderCell> + <Table.ColumnHeaderCell>Type</Table.ColumnHeaderCell> + <Table.ColumnHeaderCell>Description</Table.ColumnHeaderCell> + <Table.ColumnHeaderCell>Required</Table.ColumnHeaderCell> + <Table.ColumnHeaderCell width="60px"></Table.ColumnHeaderCell> + </Table.Row> + </Table.Header> + <Table.Body> + {parameters.map((param, index) => ( + <Table.Row key={index}> + <Table.Cell> + <TextField.Root + size="1" + value={param.name} + onChange={(e) => + updateParameter(index, "name", e.target.value) + } + placeholder="param_name" + /> + </Table.Cell> + <Table.Cell> + <Select.Root + value={param.type} + onValueChange={(v) => updateParameter(index, "type", v)} + > + <Select.Trigger /> + <Select.Content> + {PARAM_TYPES.map((t) => ( + <Select.Item key={t} value={t}> + {t} + </Select.Item> + ))} + </Select.Content> + </Select.Root> + </Table.Cell> + <Table.Cell> + <TextField.Root + size="1" + value={param.description} + onChange={(e) => + updateParameter(index, "description", e.target.value) + } + placeholder="Description" + /> + </Table.Cell> + <Table.Cell> + <Checkbox + checked={required.includes(param.name)} + disabled={!param.name} + onCheckedChange={(checked) => + toggleRequired(param.name, checked === true) + } + /> + </Table.Cell> + <Table.Cell> + <IconButton + size="1" + variant="ghost" + color="red" + onClick={() => removeParameter(index)} + > + <TrashIcon /> + </IconButton> + </Table.Cell> + </Table.Row> + ))} + </Table.Body> + </Table.Root> + )} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/ToolboxCommandForm.tsx b/refact-agent/gui/src/features/Customization/components/ToolboxCommandForm.tsx new file mode 100644 index 000000000..7a19af42d --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/ToolboxCommandForm.tsx @@ -0,0 +1,149 @@ +import React, { useCallback } from "react"; +import { Flex, TextField, Text, Switch, TextArea } from "@radix-ui/themes"; +import { MessageListEditor } from "./MessageListEditor"; +import { + ConfigPatch, + safeString, + safeBoolean, + safeMessageArray, + safeSelectionRange, +} from "./configUtils"; + +type ToolboxCommandFormProps = { + config: Record<string, unknown>; + onPatch: (patch: ConfigPatch) => void; +}; + +export const ToolboxCommandForm: React.FC<ToolboxCommandFormProps> = ({ + config, + onPatch, +}) => { + const description = safeString(config.description); + const selectionNeeded = safeSelectionRange(config.selection_needed); + const selectionUnwanted = safeBoolean(config.selection_unwanted); + const insertAtCursor = safeBoolean(config.insert_at_cursor); + const messages = safeMessageArray(config.messages); + + const hasSelectionRange = selectionNeeded !== null; + const selectionMin = hasSelectionRange ? selectionNeeded[0] : 0; + const selectionMax = hasSelectionRange ? selectionNeeded[1] : 0; + + const patch = useCallback( + (path: (string | number)[], value: unknown) => { + onPatch({ path, value }); + }, + [onPatch], + ); + + return ( + <Flex direction="column" gap="4"> + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Description + </Text> + <TextArea + value={description} + onChange={(e) => patch(["description"], e.target.value)} + placeholder="What this command does..." + rows={2} + /> + </Flex> + + <Flex direction="column" gap="3"> + <Text size="2" weight="medium"> + Selection Requirements + </Text> + + <Flex align="center" gap="2"> + <Switch + checked={hasSelectionRange} + onCheckedChange={(checked) => { + if (checked) { + patch(["selection_needed"], [1, 10000]); + patch(["selection_unwanted"], false); + } else { + patch(["selection_needed"], undefined); + } + }} + /> + <Text size="2">Require Selection</Text> + </Flex> + + {hasSelectionRange && ( + <Flex gap="3" align="center"> + <Flex direction="column" gap="1"> + <Text size="1" color="gray"> + Min chars + </Text> + <TextField.Root + type="number" + value={selectionMin.toString()} + onChange={(e) => { + const val = + e.target.value === "" + ? undefined + : parseInt(e.target.value); + if (val !== undefined) { + patch(["selection_needed"], [val, selectionMax]); + } + }} + style={{ width: 100 }} + /> + </Flex> + <Flex direction="column" gap="1"> + <Text size="1" color="gray"> + Max chars + </Text> + <TextField.Root + type="number" + value={selectionMax.toString()} + onChange={(e) => { + const val = + e.target.value === "" + ? undefined + : parseInt(e.target.value); + if (val !== undefined) { + patch(["selection_needed"], [selectionMin, val]); + } + }} + style={{ width: 100 }} + /> + </Flex> + </Flex> + )} + + {!hasSelectionRange && ( + <Flex align="center" gap="2"> + <Switch + checked={selectionUnwanted} + onCheckedChange={(checked) => + patch(["selection_unwanted"], checked) + } + /> + <Text size="2">Selection Unwanted</Text> + <Text size="1" color="gray"> + (hide command when text is selected) + </Text> + </Flex> + )} + </Flex> + + <Flex align="center" gap="2"> + <Switch + checked={insertAtCursor} + onCheckedChange={(checked) => patch(["insert_at_cursor"], checked)} + /> + <Text size="2">Insert at Cursor</Text> + <Text size="1" color="gray"> + (insert response at cursor position) + </Text> + </Flex> + + <MessageListEditor + value={messages} + onChange={(msgs) => patch(["messages"], msgs)} + label="Messages" + /> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Customization/components/configUtils.test.ts b/refact-agent/gui/src/features/Customization/components/configUtils.test.ts new file mode 100644 index 000000000..76cec20bf --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/configUtils.test.ts @@ -0,0 +1,423 @@ +import { describe, it, expect } from "vitest"; +import { + applyPatch, + applyPatches, + isPlainObject, + sanitizeObject, + safeString, + safeBoolean, + safeNumber, + safeArray, + safeObject, + safeMessageArray, + safeSelectionRange, + safeToolConfirmRules, + parseIntSafe, + parseFloatSafe, + validateConfigId, + extractSubagentExtra, + computeExtraPatches, + isMessageTemplate, + isToolConfirmRule, +} from "./configUtils"; + +describe("applyPatch", () => { + it("sets a top-level field", () => { + const obj = { a: 1 }; + const result = applyPatch(obj, { path: ["b"], value: 2 }); + expect(result).toEqual({ a: 1, b: 2 }); + }); + + it("sets a nested field", () => { + const obj = { a: { b: 1 } }; + const result = applyPatch(obj, { path: ["a", "c"], value: 2 }); + expect(result).toEqual({ a: { b: 1, c: 2 } }); + }); + + it("creates intermediate objects", () => { + const obj = {}; + const result = applyPatch(obj, { path: ["a", "b", "c"], value: 1 }); + expect(result).toEqual({ a: { b: { c: 1 } } }); + }); + + it("creates intermediate arrays for numeric keys", () => { + const obj = {}; + const result = applyPatch(obj, { path: ["items", 0], value: "first" }); + expect(result).toEqual({ items: ["first"] }); + }); + + it("deletes field when value is undefined", () => { + const obj = { a: 1, b: 2 }; + const result = applyPatch(obj, { path: ["a"], value: undefined }); + expect(result).toEqual({ b: 2 }); + }); + + it("blocks __proto__ in path", () => { + const obj = { a: 1 }; + const result = applyPatch(obj, { + path: ["__proto__", "polluted"], + value: true, + }); + expect(result).toEqual({ a: 1 }); + expect(Object.prototype).not.toHaveProperty("polluted"); + }); + + it("blocks constructor in path", () => { + const obj = { a: 1 }; + const result = applyPatch(obj, { path: ["constructor"], value: "bad" }); + expect(result).toEqual({ a: 1 }); + }); + + it("blocks prototype in path", () => { + const obj = { a: 1 }; + const result = applyPatch(obj, { path: ["prototype"], value: "bad" }); + expect(result).toEqual({ a: 1 }); + }); + + it("handles array updates correctly", () => { + const obj = { items: ["a", "b", "c"] }; + const result = applyPatch(obj, { path: ["items", 1], value: "updated" }); + expect(result).toEqual({ items: ["a", "updated", "c"] }); + }); + + it("does not mutate original object", () => { + const obj = { a: { b: 1 } }; + const result = applyPatch(obj, { path: ["a", "b"], value: 2 }); + expect(obj.a.b).toBe(1); + expect((result.a as { b: number }).b).toBe(2); + }); +}); + +describe("applyPatches", () => { + it("applies multiple patches in order", () => { + const obj = { a: 1 }; + const result = applyPatches(obj, [ + { path: ["b"], value: 2 }, + { path: ["c"], value: 3 }, + { path: ["a"], value: 10 }, + ]); + expect(result).toEqual({ a: 10, b: 2, c: 3 }); + }); +}); + +describe("sanitizeObject", () => { + it("removes dangerous keys", () => { + const obj = { a: 1, __proto__: "bad", constructor: "bad" }; + const result = sanitizeObject(obj); + expect(result).toEqual({ a: 1 }); + }); + + it("sanitizes nested objects", () => { + const obj = { a: { __proto__: "bad", b: 1 } }; + const result = sanitizeObject(obj) as Record<string, unknown>; + expect(result).toEqual({ a: { b: 1 } }); + }); + + it("sanitizes arrays", () => { + const arr = [{ __proto__: "bad", a: 1 }, { b: 2 }]; + const result = sanitizeObject(arr); + expect(result).toEqual([{ a: 1 }, { b: 2 }]); + }); + + it("passes through primitives", () => { + expect(sanitizeObject("string")).toBe("string"); + expect(sanitizeObject(123)).toBe(123); + expect(sanitizeObject(null)).toBe(null); + }); +}); + +describe("isPlainObject", () => { + it("returns true for plain objects", () => { + expect(isPlainObject({})).toBe(true); + expect(isPlainObject({ a: 1 })).toBe(true); + }); + + it("returns false for arrays", () => { + expect(isPlainObject([])).toBe(false); + }); + + it("returns false for null", () => { + expect(isPlainObject(null)).toBe(false); + }); + + it("returns false for primitives", () => { + expect(isPlainObject("string")).toBe(false); + expect(isPlainObject(123)).toBe(false); + }); +}); + +describe("safe type guards", () => { + describe("safeString", () => { + it("returns string for string input", () => { + expect(safeString("hello")).toBe("hello"); + }); + + it("returns empty string for non-string", () => { + expect(safeString(123)).toBe(""); + expect(safeString(null)).toBe(""); + expect(safeString(undefined)).toBe(""); + expect(safeString({})).toBe(""); + }); + }); + + describe("safeBoolean", () => { + it("returns boolean for boolean input", () => { + expect(safeBoolean(true)).toBe(true); + expect(safeBoolean(false)).toBe(false); + }); + + it("returns false for non-boolean", () => { + expect(safeBoolean("true")).toBe(false); + expect(safeBoolean(1)).toBe(false); + expect(safeBoolean(null)).toBe(false); + }); + }); + + describe("safeNumber", () => { + it("returns number for valid number", () => { + expect(safeNumber(42)).toBe(42); + expect(safeNumber(3.14)).toBe(3.14); + expect(safeNumber(0)).toBe(0); + }); + + it("returns undefined for non-number", () => { + expect(safeNumber("42")).toBeUndefined(); + expect(safeNumber(NaN)).toBeUndefined(); + expect(safeNumber(Infinity)).toBeUndefined(); + expect(safeNumber(null)).toBeUndefined(); + }); + }); + + describe("safeArray", () => { + it("filters array with guard", () => { + const isNum = (v: unknown): v is number => typeof v === "number"; + expect(safeArray([1, "a", 2, null, 3], isNum)).toEqual([1, 2, 3]); + }); + + it("returns empty array for non-array", () => { + const isNum = (v: unknown): v is number => typeof v === "number"; + expect(safeArray("not array", isNum)).toEqual([]); + expect(safeArray(null, isNum)).toEqual([]); + }); + }); + + describe("safeObject", () => { + it("returns object for plain object", () => { + expect(safeObject({ a: 1 })).toEqual({ a: 1 }); + }); + + it("returns empty object for non-object", () => { + expect(safeObject(null)).toEqual({}); + expect(safeObject([])).toEqual({}); + expect(safeObject("string")).toEqual({}); + }); + }); +}); + +describe("safeMessageArray", () => { + it("filters valid messages", () => { + const input = [ + { role: "user", content: "hello" }, + { role: 123, content: "bad" }, + { role: "assistant", content: "hi" }, + "not an object", + { role: "system" }, + ]; + expect(safeMessageArray(input)).toEqual([ + { role: "user", content: "hello" }, + { role: "assistant", content: "hi" }, + ]); + }); + + it("returns empty array for non-array", () => { + expect(safeMessageArray(null)).toEqual([]); + expect(safeMessageArray("string")).toEqual([]); + }); +}); + +describe("safeSelectionRange", () => { + it("returns tuple for valid range", () => { + expect(safeSelectionRange([1, 100])).toEqual([1, 100]); + expect(safeSelectionRange([0, 0])).toEqual([0, 0]); + }); + + it("returns null for invalid input", () => { + expect(safeSelectionRange(null)).toBeNull(); + expect(safeSelectionRange([1])).toBeNull(); + expect(safeSelectionRange([1, 2, 3])).toBeNull(); + expect(safeSelectionRange(["a", "b"])).toBeNull(); + expect(safeSelectionRange([1, NaN])).toBeNull(); + }); +}); + +describe("safeToolConfirmRules", () => { + it("filters valid rules", () => { + const input = [ + { match: "tree", action: "auto" }, + { match_pattern: "cat", action: "auto" }, + { match: "shell", action: "ask" }, + "not an object", + ]; + expect(safeToolConfirmRules(input)).toEqual([ + { match: "tree", action: "auto" }, + { match: "shell", action: "ask" }, + ]); + }); + + it("returns empty array for non-array", () => { + expect(safeToolConfirmRules(null)).toEqual([]); + }); +}); + +describe("parseIntSafe", () => { + it("parses valid integers", () => { + expect(parseIntSafe("42")).toBe(42); + expect(parseIntSafe("0")).toBe(0); + expect(parseIntSafe("-10")).toBe(-10); + }); + + it("returns undefined for invalid input", () => { + expect(parseIntSafe("")).toBeUndefined(); + expect(parseIntSafe("abc")).toBeUndefined(); + expect(parseIntSafe("3.14")).toBe(3); + }); +}); + +describe("parseFloatSafe", () => { + it("parses valid floats", () => { + expect(parseFloatSafe("3.14")).toBe(3.14); + expect(parseFloatSafe("42")).toBe(42); + expect(parseFloatSafe("0.5")).toBe(0.5); + }); + + it("returns undefined for invalid input", () => { + expect(parseFloatSafe("")).toBeUndefined(); + expect(parseFloatSafe("abc")).toBeUndefined(); + }); +}); + +describe("validateConfigId", () => { + it("returns null for valid IDs", () => { + expect(validateConfigId("my_mode")).toBeNull(); + expect(validateConfigId("agent")).toBeNull(); + expect(validateConfigId("mode-123")).toBeNull(); + expect(validateConfigId("a")).toBeNull(); + }); + + it("returns error for empty ID", () => { + expect(validateConfigId("")).toBe("ID is required"); + expect(validateConfigId(" ")).toBe("ID is required"); + }); + + it("returns error for path traversal", () => { + expect(validateConfigId("../bad")).toBe("ID contains invalid characters"); + expect(validateConfigId("a/b")).toBe("ID contains invalid characters"); + expect(validateConfigId("a\\b")).toBe("ID contains invalid characters"); + }); + + it("returns error for invalid characters", () => { + expect(validateConfigId("MyMode")).toBe( + "ID must contain only lowercase letters, digits, underscore, or hyphen", + ); + expect(validateConfigId("my mode")).toBe( + "ID must contain only lowercase letters, digits, underscore, or hyphen", + ); + expect(validateConfigId("mode!")).toBe( + "ID must contain only lowercase letters, digits, underscore, or hyphen", + ); + }); +}); + +describe("extractSubagentExtra", () => { + it("extracts unknown keys", () => { + const config = { + id: "test", + title: "Test", + custom_field: "value", + another: 123, + }; + expect(extractSubagentExtra(config)).toEqual({ + custom_field: "value", + another: 123, + }); + }); + + it("excludes known keys", () => { + const config = { + id: "test", + title: "Test", + description: "desc", + tools: ["cat"], + subchat: {}, + }; + expect(extractSubagentExtra(config)).toEqual({}); + }); + + it("excludes dangerous keys", () => { + const config = { + id: "test", + __proto__: "bad", + custom: "ok", + }; + expect(extractSubagentExtra(config)).toEqual({ custom: "ok" }); + }); +}); + +describe("computeExtraPatches", () => { + it("computes patches for added keys", () => { + const oldExtra = {}; + const newExtra = { custom: "value" }; + expect(computeExtraPatches(oldExtra, newExtra)).toEqual([ + { path: ["custom"], value: "value" }, + ]); + }); + + it("computes patches for removed keys", () => { + const oldExtra = { custom: "value" }; + const newExtra = {}; + expect(computeExtraPatches(oldExtra, newExtra)).toEqual([ + { path: ["custom"], value: undefined }, + ]); + }); + + it("computes patches for changed keys", () => { + const oldExtra = { custom: "old" }; + const newExtra = { custom: "new" }; + expect(computeExtraPatches(oldExtra, newExtra)).toEqual([ + { path: ["custom"], value: "new" }, + ]); + }); + + it("ignores unchanged keys", () => { + const oldExtra = { custom: "same" }; + const newExtra = { custom: "same" }; + expect(computeExtraPatches(oldExtra, newExtra)).toEqual([]); + }); +}); + +describe("isMessageTemplate", () => { + it("returns true for valid message", () => { + expect(isMessageTemplate({ role: "user", content: "hello" })).toBe(true); + }); + + it("returns false for invalid message", () => { + expect(isMessageTemplate({ role: 123, content: "hello" })).toBe(false); + expect(isMessageTemplate({ role: "user" })).toBe(false); + expect(isMessageTemplate("string")).toBe(false); + expect(isMessageTemplate(null)).toBe(false); + }); +}); + +describe("isToolConfirmRule", () => { + it("returns true for valid rule", () => { + expect(isToolConfirmRule({ match: "tree", action: "auto" })).toBe(true); + }); + + it("returns false for invalid rule", () => { + expect(isToolConfirmRule({ match_pattern: "tree", action: "auto" })).toBe( + false, + ); + expect(isToolConfirmRule({ match: "tree" })).toBe(false); + expect(isToolConfirmRule("string")).toBe(false); + }); +}); diff --git a/refact-agent/gui/src/features/Customization/components/configUtils.ts b/refact-agent/gui/src/features/Customization/components/configUtils.ts new file mode 100644 index 000000000..2fa97e96b --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/configUtils.ts @@ -0,0 +1,255 @@ +export type ConfigPatch = { + path: (string | number)[]; + value: unknown; +}; + +const DANGEROUS_KEYS = new Set(["__proto__", "constructor", "prototype"]); + +function isDangerousKey(key: string | number): boolean { + return typeof key === "string" && DANGEROUS_KEYS.has(key); +} + +export function applyPatch( + obj: Record<string, unknown>, + patch: ConfigPatch, +): Record<string, unknown> { + if (patch.path.some(isDangerousKey)) { + return obj; + } + + if (patch.path.length === 0) { + if (isPlainObject(patch.value)) { + return sanitizeObject(patch.value) as Record<string, unknown>; + } + return obj; + } + + const result = { ...obj }; + let current: Record<string, unknown> = result; + + for (let i = 0; i < patch.path.length - 1; i++) { + const key = patch.path[i]; + const nextKey = patch.path[i + 1]; + const existing = current[key]; + + if (Array.isArray(existing)) { + current[key] = (existing as unknown[]).slice(); + } else if (isPlainObject(existing)) { + current[key] = { ...existing }; + } else { + current[key] = typeof nextKey === "number" ? [] : {}; + } + current = current[key] as Record<string, unknown>; + } + + const lastKey = patch.path[patch.path.length - 1]; + if (patch.value === undefined) { + Reflect.deleteProperty(current, lastKey); + } else { + current[lastKey] = sanitizeObject(patch.value); + } + + return result; +} + +export function applyPatches( + obj: Record<string, unknown>, + patches: ConfigPatch[], +): Record<string, unknown> { + return patches.reduce((acc, patch) => applyPatch(acc, patch), obj); +} + +export function getNestedValue<T>( + obj: Record<string, unknown>, + path: string[], +): T | undefined { + let current: unknown = obj; + for (const key of path) { + if ( + current === null || + current === undefined || + typeof current !== "object" + ) { + return undefined; + } + current = (current as Record<string, unknown>)[key]; + } + return current as T; +} + +export function isPlainObject( + value: unknown, +): value is Record<string, unknown> { + return ( + typeof value === "object" && + value !== null && + !Array.isArray(value) && + Object.getPrototypeOf(value) === Object.prototype + ); +} + +export function sanitizeObject(obj: unknown): unknown { + if (!isPlainObject(obj)) { + if (Array.isArray(obj)) { + return obj.map(sanitizeObject); + } + return obj; + } + + const result: Record<string, unknown> = {}; + for (const [key, value] of Object.entries(obj)) { + if (key === "__proto__" || key === "constructor" || key === "prototype") { + continue; + } + result[key] = sanitizeObject(value); + } + return result; +} + +const SUBAGENT_KNOWN_KEYS = new Set([ + "schema_version", + "id", + "title", + "description", + "specific", + "expose_as_tool", + "has_code", + "tool", + "subchat", + "messages", + "prompts", + "gather_files", + "tools", + "base", + "match_models", +]); + +export function extractSubagentExtra( + config: Record<string, unknown>, +): Record<string, unknown> { + const extra: Record<string, unknown> = {}; + for (const [key, value] of Object.entries(config)) { + if (!SUBAGENT_KNOWN_KEYS.has(key) && !DANGEROUS_KEYS.has(key)) { + extra[key] = value; + } + } + return extra; +} + +export function computeExtraPatches( + oldExtra: Record<string, unknown>, + newExtra: Record<string, unknown>, +): ConfigPatch[] { + const patches: ConfigPatch[] = []; + const allKeys = new Set([...Object.keys(oldExtra), ...Object.keys(newExtra)]); + + for (const key of allKeys) { + if (DANGEROUS_KEYS.has(key) || SUBAGENT_KNOWN_KEYS.has(key)) continue; + + if (!(key in newExtra)) { + patches.push({ path: [key], value: undefined }); + } else if ( + JSON.stringify(oldExtra[key]) !== JSON.stringify(newExtra[key]) + ) { + patches.push({ path: [key], value: newExtra[key] }); + } + } + + return patches; +} + +export function safeArray<T>( + value: unknown, + guard: (v: unknown) => v is T, +): T[] { + if (!Array.isArray(value)) return []; + return value.filter(guard); +} + +export function safeString(value: unknown): string { + return typeof value === "string" ? value : ""; +} + +export function safeBoolean(value: unknown): boolean { + return typeof value === "boolean" ? value : false; +} + +export function safeNumber(value: unknown): number | undefined { + if (typeof value === "number" && Number.isFinite(value)) return value; + return undefined; +} + +export function safeObject(value: unknown): Record<string, unknown> { + return isPlainObject(value) ? value : {}; +} + +export function isString(v: unknown): v is string { + return typeof v === "string"; +} + +export type MessageTemplate = { + role: string; + content: string; +}; + +export function isMessageTemplate(v: unknown): v is MessageTemplate { + return ( + isPlainObject(v) && + typeof v.role === "string" && + typeof v.content === "string" + ); +} + +export function safeMessageArray(value: unknown): MessageTemplate[] { + if (!Array.isArray(value)) return []; + return value.filter(isMessageTemplate); +} + +export function safeSelectionRange(value: unknown): [number, number] | null { + if (!Array.isArray(value) || value.length !== 2) return null; + const min: unknown = value[0]; + const max: unknown = value[1]; + if (typeof min !== "number" || typeof max !== "number") return null; + if (!Number.isFinite(min) || !Number.isFinite(max)) return null; + return [min, max]; +} + +export function parseIntSafe(value: string): number | undefined { + if (!value) return undefined; + const n = Number.parseInt(value, 10); + return Number.isFinite(n) ? n : undefined; +} + +export function parseFloatSafe(value: string): number | undefined { + if (!value) return undefined; + const n = Number.parseFloat(value); + return Number.isFinite(n) ? n : undefined; +} + +export function isToolConfirmRule( + v: unknown, +): v is { match: string; action: string } { + return ( + isPlainObject(v) && + typeof v.match === "string" && + typeof v.action === "string" + ); +} + +export function safeToolConfirmRules( + value: unknown, +): { match: string; action: string }[] { + if (!Array.isArray(value)) return []; + return value.filter(isToolConfirmRule); +} + +const ID_PATTERN = /^[a-z0-9_-]+$/; + +export function validateConfigId(id: string): string | null { + if (!id.trim()) return "ID is required"; + if (id.includes("/") || id.includes("\\") || id.includes("..")) + return "ID contains invalid characters"; + if (!ID_PATTERN.test(id)) + return "ID must contain only lowercase letters, digits, underscore, or hyphen"; + return null; +} diff --git a/refact-agent/gui/src/features/Customization/components/editors.module.css b/refact-agent/gui/src/features/Customization/components/editors.module.css new file mode 100644 index 000000000..635343369 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/editors.module.css @@ -0,0 +1,129 @@ +.messageItem { + padding: var(--space-2); + border: 1px solid var(--gray-6); + border-radius: var(--radius-2); + background-color: var(--gray-2); +} + +.messageContent { + font-family: var(--code-font-family); + font-size: var(--font-size-1); + min-height: 60px; +} + +.tagBadge { + display: inline-flex; + align-items: center; + gap: var(--space-1); + padding-right: var(--space-1); +} + +.tagRemove { + width: 14px; + height: 14px; + min-width: 14px; + min-height: 14px; +} + +.suggestions { + composes: scrollbarThin from "../../../components/shared/scrollbar.module.css"; + position: absolute; + top: 100%; + left: 0; + right: 40px; + max-height: 150px; + overflow-y: auto; + background-color: var(--gray-1); + border: 1px solid var(--gray-6); + border-radius: var(--radius-2); + z-index: 100; + box-shadow: var(--shadow-3); +} + +.suggestionItem { + display: block; + width: 100%; + padding: var(--space-1) var(--space-2); + text-align: left; + background: none; + border: none; + cursor: pointer; + font-size: var(--font-size-1); + color: var(--gray-12); +} + +.suggestionItem:hover { + background-color: var(--accent-a3); +} + +.formSection { + padding: var(--space-2); + border: 1px solid var(--gray-5); + border-radius: var(--radius-2); + background-color: var(--gray-1); +} + +.promptTextarea { + min-height: 100px; + font-family: var(--code-font-family); + font-size: var(--font-size-1); +} + +.promptTextareaExpand { + composes: scrollbarThin from "../../../components/shared/scrollbar.module.css"; + flex: 1; + min-height: 100px; + font-family: var(--code-font-family); + font-size: var(--font-size-1); + resize: none; + overflow-y: auto; +} + +.formTabContent { + composes: scrollbarThin from "../../../components/shared/scrollbar.module.css"; + display: flex; + flex-direction: column; + flex: 1; + min-height: 0; + gap: var(--space-3); + padding-top: var(--space-3); + overflow-y: auto; +} + +.formTabContentExpanding { + display: flex; + flex-direction: column; + flex: 1; + min-height: 0; + gap: var(--space-3); + padding-top: var(--space-3); + overflow: hidden; +} + +.expandingField { + display: flex; + flex-direction: column; + flex: 1; + min-height: 100px; + gap: var(--space-1); + overflow: hidden; +} + +.extraFieldsEditor { + min-height: 80px; + font-family: var(--code-font-family); + font-size: var(--font-size-1); +} + +.compactField { + min-width: 0; +} + +.fieldRow { + flex-wrap: wrap; + gap: var(--space-2); +} + +.fieldRow > * { + min-width: 80px; +} diff --git a/refact-agent/gui/src/features/Customization/components/index.ts b/refact-agent/gui/src/features/Customization/components/index.ts new file mode 100644 index 000000000..56c9dab48 --- /dev/null +++ b/refact-agent/gui/src/features/Customization/components/index.ts @@ -0,0 +1,30 @@ +export { MessageListEditor } from "./MessageListEditor"; +export type { MessageTemplate } from "./MessageListEditor"; + +export { StringListEditor } from "./StringListEditor"; + +export { RulesTableEditor } from "./RulesTableEditor"; +export type { ToolConfirmRule } from "./RulesTableEditor"; + +export { ToolParametersEditor } from "./ToolParametersEditor"; +export type { ToolParameter } from "./ToolParametersEditor"; + +export { CodeLensForm } from "./CodeLensForm"; +export { ToolboxCommandForm } from "./ToolboxCommandForm"; +export { ModeForm } from "./ModeForm"; +export { SubagentForm } from "./SubagentForm"; + +export { + applyPatch, + isPlainObject, + sanitizeObject, + extractSubagentExtra, + computeExtraPatches, + safeArray, + safeString, + safeBoolean, + safeNumber, + safeObject, + isString, +} from "./configUtils"; +export type { ConfigPatch } from "./configUtils"; diff --git a/refact-agent/gui/src/features/Customization/index.ts b/refact-agent/gui/src/features/Customization/index.ts new file mode 100644 index 000000000..d01fd88ed --- /dev/null +++ b/refact-agent/gui/src/features/Customization/index.ts @@ -0,0 +1,2 @@ +export { Customization } from "./Customization"; +export type { CustomizationProps } from "./Customization"; diff --git a/refact-agent/gui/src/features/DefaultModels/DefaultModels.module.css b/refact-agent/gui/src/features/DefaultModels/DefaultModels.module.css new file mode 100644 index 000000000..6c3000a71 --- /dev/null +++ b/refact-agent/gui/src/features/DefaultModels/DefaultModels.module.css @@ -0,0 +1,3 @@ +.modelTypeCard { + padding: var(--space-4); +} diff --git a/refact-agent/gui/src/features/DefaultModels/DefaultModels.tsx b/refact-agent/gui/src/features/DefaultModels/DefaultModels.tsx new file mode 100644 index 000000000..5ae8aa096 --- /dev/null +++ b/refact-agent/gui/src/features/DefaultModels/DefaultModels.tsx @@ -0,0 +1,260 @@ +import React, { useState, useCallback, useEffect, useMemo } from "react"; +import { Flex, Button, Text, Card, Heading, Callout } from "@radix-ui/themes"; +import { ArrowLeftIcon, ExclamationTriangleIcon } from "@radix-ui/react-icons"; + +import { ScrollArea } from "../../components/ScrollArea"; +import { PageWrapper } from "../../components/PageWrapper"; +import { Spinner } from "../../components/Spinner"; +import { ModelSelector } from "../../components/Chat/ModelSelector"; +import { + ModelSamplingParams, + type SamplingValues, +} from "../../components/ModelSamplingParams"; + +import { + useGetDefaultsQuery, + useUpdateDefaultsMutation, + type ModelTypeDefaults, + type ProviderDefaults, +} from "../../services/refact/providers"; +import { useGetCapsQuery } from "../../services/refact/caps"; + +import type { Config } from "../Config/configSlice"; + +import styles from "./DefaultModels.module.css"; + +type DefaultModelsProps = { + backFromDefaultModels: () => void; + host: Config["host"]; + tabbed: Config["tabbed"]; +}; + +type ModelTypeKey = "chat" | "chat_light" | "chat_thinking"; + +const MODEL_TYPE_LABELS: Record< + ModelTypeKey, + { title: string; description: string } +> = { + chat: { + title: "Default Chat Model", + description: "The primary model used for chat conversations", + }, + chat_light: { + title: "Light Chat Model", + description: "Fast, lightweight model for quick responses and subagents", + }, + chat_thinking: { + title: "Thinking Model", + description: "Reasoning-focused model for complex analysis tasks", + }, +}; + +const ModelTypeSection: React.FC<{ + typeKey: ModelTypeKey; + config: ModelTypeDefaults; + capsDefault: string; + onChange: (key: ModelTypeKey, config: ModelTypeDefaults) => void; +}> = ({ typeKey, config, capsDefault, onChange }) => { + const { title, description } = MODEL_TYPE_LABELS[typeKey]; + + const handleModelChange = useCallback( + (model: string) => { + onChange(typeKey, { ...config, model }); + }, + [typeKey, config, onChange], + ); + + const handleSamplingChange = useCallback( + <K extends keyof SamplingValues>(field: K, value: SamplingValues[K]) => { + onChange(typeKey, { ...config, [field]: value }); + }, + [typeKey, config, onChange], + ); + + const effectiveModel = config.model ?? capsDefault; + + return ( + <Card className={styles.modelTypeCard}> + <Flex direction="column" gap="4"> + <Flex direction="column" gap="1"> + <Heading size="3">{title}</Heading> + <Text size="2" color="gray"> + {description} + </Text> + </Flex> + + <Flex direction="column" gap="2"> + <Text size="2" weight="medium"> + Model + </Text> + <ModelSelector + value={config.model} + onValueChange={handleModelChange} + defaultValue={capsDefault} + showLabel={false} + compact={false} + /> + </Flex> + + <ModelSamplingParams + model={effectiveModel} + values={config} + onChange={handleSamplingChange} + size="2" + /> + </Flex> + </Card> + ); +}; + +export const DefaultModels: React.FC<DefaultModelsProps> = ({ + backFromDefaultModels, + host, + tabbed, +}) => { + const { + data: defaults, + isLoading, + isSuccess, + isError, + refetch, + } = useGetDefaultsQuery(undefined); + const { data: capsData } = useGetCapsQuery(undefined); + const [updateDefaults, { isLoading: isSaving }] = useUpdateDefaultsMutation(); + + const capsDefaults = useMemo( + () => ({ + chat: capsData?.chat_default_model ?? "", + chat_light: capsData?.chat_light_model ?? "", + chat_thinking: capsData?.chat_thinking_model ?? "", + }), + [capsData], + ); + + const [localDefaults, setLocalDefaults] = useState<ProviderDefaults>({ + chat: {}, + chat_light: {}, + chat_thinking: {}, + }); + + const [hasChanges, setHasChanges] = useState(false); + const [saveError, setSaveError] = useState<string | null>(null); + + useEffect(() => { + if (defaults) { + setLocalDefaults(defaults); + setHasChanges(false); + } + }, [defaults]); + + const handleModelTypeChange = useCallback( + (key: ModelTypeKey, config: ModelTypeDefaults) => { + setLocalDefaults((prev) => ({ + ...prev, + [key]: config, + })); + setHasChanges(true); + setSaveError(null); + }, + [], + ); + + const handleSave = useCallback(async () => { + try { + await updateDefaults(localDefaults).unwrap(); + setHasChanges(false); + setSaveError(null); + } catch { + setSaveError("Failed to save defaults. Please try again."); + } + }, [localDefaults, updateDefaults]); + + if (isLoading) { + return <Spinner spinning />; + } + + if (isError || !isSuccess) { + return ( + <PageWrapper host={host}> + <Flex direction="column" gap="4" p="4" align="center" justify="center"> + <Callout.Root color="red"> + <Callout.Icon> + <ExclamationTriangleIcon /> + </Callout.Icon> + <Callout.Text> + Failed to load default models configuration. + </Callout.Text> + </Callout.Root> + <Button onClick={() => void refetch()}>Retry</Button> + <Button variant="outline" onClick={backFromDefaultModels}> + Back + </Button> + </Flex> + </PageWrapper> + ); + } + + return ( + <PageWrapper + host={host} + style={{ + padding: 0, + marginTop: 0, + }} + > + <Flex direction="column" gap="4" p="4" style={{ height: "100%" }}> + <Flex justify="between" align="center"> + {host === "vscode" && !tabbed ? ( + <Button variant="surface" onClick={backFromDefaultModels}> + <ArrowLeftIcon width="16" height="16" /> + Back + </Button> + ) : ( + <Button variant="outline" onClick={backFromDefaultModels}> + Back + </Button> + )} + + <Button + onClick={() => void handleSave()} + disabled={!hasChanges || isSaving} + variant="solid" + > + {isSaving ? "Saving..." : "Save Changes"} + </Button> + </Flex> + + {saveError && ( + <Callout.Root color="red"> + <Callout.Icon> + <ExclamationTriangleIcon /> + </Callout.Icon> + <Callout.Text>{saveError}</Callout.Text> + </Callout.Root> + )} + + <Flex direction="column" gap="2"> + <Heading size="5">Default Models</Heading> + <Text size="2" color="gray"> + Configure which models to use by default for different purposes. + These settings apply globally across all modes. + </Text> + </Flex> + + <ScrollArea scrollbars="vertical" fullHeight> + <Flex direction="column" gap="4" pb="4"> + {(Object.keys(MODEL_TYPE_LABELS) as ModelTypeKey[]).map((key) => ( + <ModelTypeSection + key={key} + typeKey={key} + config={localDefaults[key]} + capsDefault={capsDefaults[key]} + onChange={handleModelTypeChange} + /> + ))} + </Flex> + </ScrollArea> + </Flex> + </PageWrapper> + ); +}; diff --git a/refact-agent/gui/src/features/DefaultModels/index.ts b/refact-agent/gui/src/features/DefaultModels/index.ts new file mode 100644 index 000000000..4d1271df8 --- /dev/null +++ b/refact-agent/gui/src/features/DefaultModels/index.ts @@ -0,0 +1 @@ +export { DefaultModels } from "./DefaultModels"; diff --git a/refact-agent/gui/src/features/Errors/errorsSlice.ts b/refact-agent/gui/src/features/Errors/errorsSlice.ts index eab72101d..d9ac6320f 100644 --- a/refact-agent/gui/src/features/Errors/errorsSlice.ts +++ b/refact-agent/gui/src/features/Errors/errorsSlice.ts @@ -15,7 +15,7 @@ export const errorSlice = createSlice({ initialState, reducers: { setError: (state, action: PayloadAction<string>) => { - if (state.message) return state; + if (state.message) return; state.message = action.payload; if (state.message.includes(BALLANCE_LIMIT_MESSAGES[0])) { state.type = "balance"; @@ -39,37 +39,3 @@ export const errorSlice = createSlice({ export const { setError, setIsAuthError, clearError } = errorSlice.actions; export const { getErrorMessage, getIsAuthError, getErrorType } = errorSlice.selectors; - -// export const errorMiddleware = createListenerMiddleware(); -// const startErrorListening = errorMiddleware.startListening.withTypes< -// RootState, -// AppDispatch -// >(); - -// startErrorListening({ -// // matcher: isAnyOf(chatError, isRejected), -// // TODO: figure out why this breaks the tests when it's not a function :/ -// matcher: isAnyOf(isRejected), -// effect: (action, listenerApi) => { -// if (capsEndpoints.getCaps.matchRejected(action) && !action.meta.condition) { -// const message = `fetching caps from lsp`; -// listenerApi.dispatch(setError(message)); -// } - -// if ( -// promptsEndpoints.getPrompts.matchRejected(action) && -// !action.meta.condition -// ) { -// const message = `fetching system prompts.`; -// listenerApi.dispatch(setError(action.error.message ?? message)); -// } - -// if ( -// chatAskQuestionThunk.rejected.match(action) && -// !action.meta.aborted && -// typeof action.payload === "string" -// ) { -// listenerApi.dispatch(setError(action.payload)); -// } -// }, -// }); diff --git a/refact-agent/gui/src/features/Errors/informationSlice.ts b/refact-agent/gui/src/features/Errors/informationSlice.ts index dcfc29a29..2a7761c47 100644 --- a/refact-agent/gui/src/features/Errors/informationSlice.ts +++ b/refact-agent/gui/src/features/Errors/informationSlice.ts @@ -1,6 +1,6 @@ import { createSlice, type PayloadAction } from "@reduxjs/toolkit"; -import { chatResponse } from "../Chat"; import { smallCloudApi } from "../../services/smallcloud"; +import { applyChatEvent } from "../Chat/Thread/actions"; export type InformationSliceState = { message: string | null; @@ -18,7 +18,7 @@ export const informationSlice = createSlice({ initialState, reducers: { setInformation: (state, action: PayloadAction<string>) => { - if (state.message) return state; + if (state.message) return; state.message = action.payload; }, clearInformation: (state, _action: PayloadAction) => { @@ -40,38 +40,38 @@ export const informationSlice = createSlice({ }, extraReducers: (builder) => { - builder.addMatcher(chatResponse.match, (state, action) => { + // Listen to SSE events for metering balance updates (addCase must come before addMatcher) + builder.addCase(applyChatEvent, (state, action) => { + const event = action.payload; + // Check for metering_balance in SSE events if ( - state.dismissed && - "metering_balance" in action.payload && - typeof action.payload.metering_balance === "number" && - action.payload.metering_balance > 2000 + "metering_balance" in event && + typeof event.metering_balance === "number" ) { - state.dismissed = false; - } - if (state.dismissed) return state; - if (state.message) return state; - if (!("metering_balance" in action.payload)) return state; - if (typeof action.payload.metering_balance !== "number") return state; - if (action.payload.metering_balance <= 2000) { - state.type = "balance"; - state.message = - "Your account is running low on credits. Please top up your account to continue using the service."; + const balance = event.metering_balance; + if (state.dismissed && balance > 2000) { + state.dismissed = false; + } + if (state.dismissed) return; + if (state.message) return; + if (balance <= 2000) { + state.type = "balance"; + state.message = + "Your account is running low on credits. Please top up your account to continue using the service."; + } } - return state; }); builder.addMatcher( smallCloudApi.endpoints.getUser.matchFulfilled, (state, action) => { - if (state.dismissed) return state; - if (state.message) return state; + if (state.dismissed) return; + if (state.message) return; if (action.payload.metering_balance <= 2000) { state.type = "balance"; state.message = "Your account is running low on credits. Please top up your account to continue using the service."; } - return state; }, ); }, diff --git a/refact-agent/gui/src/features/History/historySlice.test.ts b/refact-agent/gui/src/features/History/historySlice.test.ts new file mode 100644 index 000000000..fc70e505b --- /dev/null +++ b/refact-agent/gui/src/features/History/historySlice.test.ts @@ -0,0 +1,421 @@ +import { describe, it, expect } from "vitest"; +import { + getHistoryTree, + HistoryState, + ChatHistoryItem, + historySlice, +} from "./historySlice"; + +function createHistoryItem( + id: string, + title: string, + overrides: Partial<ChatHistoryItem> = {}, +): ChatHistoryItem { + return { + id, + title, + createdAt: "2024-01-01T00:00:00Z", + updatedAt: "2024-01-01T00:00:00Z", + model: "gpt-4", + mode: "AGENT", + tool_use: "agent", + messages: [], + boost_reasoning: false, + include_project_info: true, + increase_max_tokens: false, + + last_user_message_id: "", + ...overrides, + }; +} + +const defaultPagination = { cursor: null, hasMore: true }; + +describe("getHistoryTree", () => { + it("returns empty array for empty state", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + const result = getHistoryTree({ history: state }); + expect(result).toEqual([]); + }); + + it("returns flat list when no parent_id relationships exist", () => { + const state: HistoryState = { + chats: { + chat1: createHistoryItem("chat1", "Chat 1", { + updatedAt: "2024-01-03T00:00:00Z", + }), + chat2: createHistoryItem("chat2", "Chat 2", { + updatedAt: "2024-01-02T00:00:00Z", + }), + chat3: createHistoryItem("chat3", "Chat 3", { + updatedAt: "2024-01-01T00:00:00Z", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(3); + expect(result[0].id).toBe("chat1"); + expect(result[1].id).toBe("chat2"); + expect(result[2].id).toBe("chat3"); + expect(result[0].children).toEqual([]); + }); + + it("builds tree structure with parent_id relationships", () => { + const state: HistoryState = { + chats: { + parent: createHistoryItem("parent", "Parent Chat", { + updatedAt: "2024-01-03T00:00:00Z", + }), + child1: createHistoryItem("child1", "Child 1", { + updatedAt: "2024-01-02T00:00:00Z", + parent_id: "parent", + }), + child2: createHistoryItem("child2", "Child 2", { + updatedAt: "2024-01-01T00:00:00Z", + parent_id: "parent", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("parent"); + expect(result[0].children).toHaveLength(2); + expect(result[0].children[0].id).toBe("child1"); + expect(result[0].children[1].id).toBe("child2"); + }); + + it("handles nested tree structure", () => { + const state: HistoryState = { + chats: { + root: createHistoryItem("root", "Root", { + updatedAt: "2024-01-04T00:00:00Z", + }), + level1: createHistoryItem("level1", "Level 1", { + updatedAt: "2024-01-03T00:00:00Z", + parent_id: "root", + }), + level2: createHistoryItem("level2", "Level 2", { + updatedAt: "2024-01-02T00:00:00Z", + parent_id: "level1", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("root"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("level1"); + expect(result[0].children[0].children).toHaveLength(1); + expect(result[0].children[0].children[0].id).toBe("level2"); + }); + + it("treats items with missing parent as roots", () => { + const state: HistoryState = { + chats: { + orphan: createHistoryItem("orphan", "Orphan", { + updatedAt: "2024-01-02T00:00:00Z", + parent_id: "nonexistent", + }), + regular: createHistoryItem("regular", "Regular", { + updatedAt: "2024-01-01T00:00:00Z", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(2); + expect(result.map((n: { id: string }) => n.id)).toContain("orphan"); + expect(result.map((n: { id: string }) => n.id)).toContain("regular"); + }); + + it("sorts roots and children by updatedAt descending", () => { + const state: HistoryState = { + chats: { + parent: createHistoryItem("parent", "Parent", { + updatedAt: "2024-01-01T00:00:00Z", + }), + child_old: createHistoryItem("child_old", "Old Child", { + updatedAt: "2024-01-01T00:00:00Z", + parent_id: "parent", + }), + child_new: createHistoryItem("child_new", "New Child", { + updatedAt: "2024-01-03T00:00:00Z", + parent_id: "parent", + }), + child_mid: createHistoryItem("child_mid", "Mid Child", { + updatedAt: "2024-01-02T00:00:00Z", + parent_id: "parent", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result[0].children[0].id).toBe("child_new"); + expect(result[0].children[1].id).toBe("child_mid"); + expect(result[0].children[2].id).toBe("child_old"); + }); + + it("filters out task chats from tree", () => { + const state: HistoryState = { + chats: { + task_chat: createHistoryItem("task_chat", "Task Chat", { + task_id: "task-123", + }), + regular: createHistoryItem("regular", "Regular Chat"), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("regular"); + }); + + it("inverts handoff relationship - handoff becomes root with parent as child", () => { + const state: HistoryState = { + chats: { + original: createHistoryItem("original", "Original Chat", { + updatedAt: "2024-01-01T00:00:00Z", + }), + handoff: createHistoryItem("handoff", "Handoff Chat", { + updatedAt: "2024-01-02T00:00:00Z", + parent_id: "original", + link_type: "handoff", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("handoff"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("original"); + }); + + it("keeps subagent as child of parent", () => { + const state: HistoryState = { + chats: { + parent: createHistoryItem("parent", "Parent Chat", { + updatedAt: "2024-01-02T00:00:00Z", + }), + subagent: createHistoryItem("subagent", "Subagent Chat", { + updatedAt: "2024-01-01T00:00:00Z", + parent_id: "parent", + link_type: "subagent", + }), + }, + isLoading: false, + loadError: null, + pagination: defaultPagination, + }; + + const result = getHistoryTree({ history: state }); + + expect(result).toHaveLength(1); + expect(result[0].id).toBe("parent"); + expect(result[0].children).toHaveLength(1); + expect(result[0].children[0].id).toBe("subagent"); + }); +}); + +describe("pagination reducers", () => { + it("setPagination updates cursor and hasMore", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: null, + pagination: { cursor: null, hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.setPagination({ + cursor: "next-cursor", + hasMore: true, + }), + ); + + expect(result.pagination.cursor).toBe("next-cursor"); + expect(result.pagination.hasMore).toBe(true); + }); + + it("setPagination sets hasMore to false when no more pages", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: null, + pagination: { cursor: "some-cursor", hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.setPagination({ + cursor: null, + hasMore: false, + }), + ); + + expect(result.pagination.cursor).toBeNull(); + expect(result.pagination.hasMore).toBe(false); + }); +}); + +describe("error handling reducers", () => { + it("setHistoryLoadError sets error without affecting pagination", () => { + const state: HistoryState = { + chats: {}, + isLoading: true, + loadError: null, + pagination: { cursor: "some-cursor", hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.setHistoryLoadError("Network error"), + ); + + expect(result.loadError).toBe("Network error"); + expect(result.isLoading).toBe(false); + expect(result.pagination.hasMore).toBe(true); + expect(result.pagination.cursor).toBe("some-cursor"); + }); + + it("setHistoryLoadError clears error when null is passed", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: "Previous error", + pagination: { cursor: null, hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.setHistoryLoadError(null), + ); + + expect(result.loadError).toBeNull(); + }); + + it("setHistoryLoading clears error when loading starts", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: "Previous error", + pagination: { cursor: null, hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.setHistoryLoading(true), + ); + + expect(result.isLoading).toBe(true); + expect(result.loadError).toBeNull(); + }); +}); + +describe("session_state handling", () => { + it("hydrateHistoryFromMeta includes session_state", () => { + const state: HistoryState = { + chats: {}, + isLoading: false, + loadError: null, + pagination: { cursor: null, hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.hydrateHistoryFromMeta([ + { + id: "chat1", + title: "Test Chat", + created_at: "2024-01-01T00:00:00Z", + updated_at: "2024-01-01T00:00:00Z", + model: "gpt-4", + mode: "AGENT", + message_count: 5, + session_state: "generating", + total_lines_added: 0, + total_lines_removed: 0, + tasks_total: 0, + tasks_done: 0, + tasks_failed: 0, + }, + ]), + ); + + expect(result.chats.chat1).toBeDefined(); + expect(result.chats.chat1.session_state).toBe("generating"); + }); + + it("hydrateHistoryFromMeta updates session_state for existing chats", () => { + const state: HistoryState = { + chats: { + chat1: createHistoryItem("chat1", "Test Chat", { + session_state: "idle", + }), + }, + isLoading: false, + loadError: null, + pagination: { cursor: null, hasMore: true }, + }; + + const result = historySlice.reducer( + state, + historySlice.actions.hydrateHistoryFromMeta([ + { + id: "chat1", + title: "Test Chat", + created_at: "2024-01-01T00:00:00Z", + updated_at: "2024-01-02T00:00:00Z", + model: "gpt-4", + mode: "AGENT", + message_count: 5, + session_state: "executing_tools", + total_lines_added: 0, + total_lines_removed: 0, + tasks_total: 0, + tasks_done: 0, + tasks_failed: 0, + }, + ]), + ); + + expect(result.chats.chat1.session_state).toBe("executing_tools"); + }); +}); diff --git a/refact-agent/gui/src/features/History/historySlice.ts b/refact-agent/gui/src/features/History/historySlice.ts index 9f9fabe22..af39dbb74 100644 --- a/refact-agent/gui/src/features/History/historySlice.ts +++ b/refact-agent/gui/src/features/History/historySlice.ts @@ -5,21 +5,22 @@ import { } from "@reduxjs/toolkit"; import { backUpMessages, - chatAskedQuestion, - chatGenerateTitleThunk, ChatThread, - doneStreaming, - isLspChatMode, + normalizeLegacyMode, maybeAppendToolCallResultFromIdeToMessages, - removeChatFromCache, - restoreChat, setChatMode, SuggestedChat, + applyChatEvent, + newChatAction, + createChatWithId, + restoreChat, + switchToThread, } from "../Chat/Thread"; import { - isAssistantMessage, - isChatGetTitleActionPayload, - isUserMessage, + trajectoriesApi, + TrajectoryData, + TrajectoryMeta, + trajectoryDataToChatThread, } from "../../services/refact"; import { AppDispatch, RootState } from "../../app/store"; import { ideToolCallResponse } from "../../hooks/useEventBusForIDE"; @@ -30,29 +31,143 @@ export type ChatHistoryItem = Omit<ChatThread, "new_chat_suggested"> & { title: string; isTitleGenerated?: boolean; new_chat_suggested?: SuggestedChat; + parent_id?: string; + link_type?: string; + task_id?: string; + task_role?: string; + agent_id?: string; + card_id?: string; + session_state?: + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + message_count?: number; + root_chat_id?: string; + total_coins?: number; + total_lines_added?: number; + total_lines_removed?: number; + tasks_total?: number; + tasks_done?: number; + tasks_failed?: number; }; +export function isTaskChatLike( + x: Partial<Pick<ChatHistoryItem, "task_id" | "task_meta" | "is_task_chat">>, +): boolean { + return Boolean(x.task_id ?? x.task_meta?.task_id ?? x.is_task_chat); +} + export type HistoryMeta = Pick< ChatHistoryItem, "id" | "title" | "createdAt" | "model" | "updatedAt" > & { userMessageCount: number }; -export type HistoryState = Record<string, ChatHistoryItem>; +export type HistoryState = { + chats: Record<string, ChatHistoryItem>; + isLoading: boolean; + loadError: string | null; + pagination: { + cursor: string | null; + hasMore: boolean; + }; +}; + +export type TrajectoryWithMeta = TrajectoryData & { + parent_id?: string; + link_type?: string; + task_id?: string; + task_role?: string; + agent_id?: string; + card_id?: string; +}; + +export type HistoryTreeNode = ChatHistoryItem & { + children: HistoryTreeNode[]; +}; + +export function buildHistoryTree( + chats: Record<string, ChatHistoryItem>, +): HistoryTreeNode[] { + const nodes = Object.values(chats) + .filter((x) => !isTaskChatLike(x)) + .map((x) => ({ ...x, children: [] as HistoryTreeNode[] })); + + const byId = new Map(nodes.map((n) => [n.id, n])); + const parentByChild = new Map<string, string>(); + + const ordered = [...nodes].sort((a, b) => + b.updatedAt.localeCompare(a.updatedAt), + ); -const initialState: HistoryState = {}; + const wouldCycle = (parentId: string, childId: string): boolean => { + let cur: string | undefined = parentId; + while (cur) { + if (cur === childId) return true; + cur = parentByChild.get(cur); + } + return false; + }; + + const attach = (parentId: string, childId: string) => { + if (parentByChild.has(childId)) return; + if (wouldCycle(parentId, childId)) return; + const parent = byId.get(parentId); + const child = byId.get(childId); + if (!parent || !child) return; + parentByChild.set(childId, parentId); + parent.children.push(child); + }; + + for (const node of ordered) { + const pid = node.parent_id; + if (!pid || !byId.has(pid)) continue; + if (node.link_type === "handoff" || node.link_type === "mode_transition") { + attach(node.id, pid); + } else { + attach(pid, node.id); + } + } + + const sortTree = (xs: HistoryTreeNode[]) => { + xs.sort((a, b) => b.updatedAt.localeCompare(a.updatedAt)); + for (const x of xs) sortTree(x.children); + }; + + const roots = nodes.filter((n) => !parentByChild.has(n.id)); + sortTree(roots); + return roots; +} + +const initialState: HistoryState = { + chats: {}, + isLoading: true, + loadError: null, + pagination: { + cursor: null, + hasMore: true, + }, +}; function getFirstUserContentFromChat(messages: ChatThread["messages"]): string { - const message = messages.find(isUserMessage); + const message = messages.find( + (msg): msg is ChatThread["messages"][number] & { role: "user" } => + msg.role === "user", + ); if (!message) return "New Chat"; if (typeof message.content === "string") { - return message.content.replace(/^\s+/, ""); + return message.content.replace(/^\s+/, "").slice(0, 100); } - const firstUserInput = message.content.find((message) => { - if ("m_type" in message && message.m_type === "text") { + const firstUserInput = message.content.find((item) => { + if ("m_type" in item && item.m_type === "text") { return true; } - if ("type" in message && message.type === "text") { + if ("type" in item && item.type === "text") { return true; } return false; @@ -65,94 +180,322 @@ function getFirstUserContentFromChat(messages: ChatThread["messages"]): string { ? firstUserInput.text : "New Chat"; - return text.replace(/^\s+/, ""); + return text.replace(/^\s+/, "").slice(0, 100); +} + +function chatThreadToHistoryItem(thread: ChatThread): ChatHistoryItem { + const now = new Date().toISOString(); + const updatedMode = normalizeLegacyMode(thread.mode); + + return { + ...thread, + title: thread.title ?? getFirstUserContentFromChat(thread.messages), + createdAt: thread.createdAt ?? now, + updatedAt: now, + integration: thread.integration, + currentMaximumContextTokens: thread.currentMaximumContextTokens, + isTitleGenerated: thread.isTitleGenerated, + mode: updatedMode, + task_id: thread.task_meta?.task_id, + }; +} + +function trajectoryToHistoryItem( + data: TrajectoryData, + meta?: { + parent_id?: string; + link_type?: string; + task_id?: string; + task_role?: string; + agent_id?: string; + card_id?: string; + }, +): ChatHistoryItem { + const thread = trajectoryDataToChatThread(data); + return { + ...thread, + createdAt: data.created_at, + updatedAt: data.updated_at, + title: data.title, + isTitleGenerated: data.isTitleGenerated, + parent_id: meta?.parent_id, + link_type: meta?.link_type, + task_id: meta?.task_id, + task_role: meta?.task_role, + agent_id: meta?.agent_id, + card_id: meta?.card_id, + }; +} + +function trajectoryMetaToHistoryItem(meta: TrajectoryMeta): ChatHistoryItem { + return { + id: meta.id, + title: meta.title, + model: meta.model, + mode: meta.mode as ChatHistoryItem["mode"], + tool_use: "agent", + messages: [], + boost_reasoning: false, + context_tokens_cap: undefined, + include_project_info: true, + increase_max_tokens: false, + project_name: undefined, + isTitleGenerated: false, + createdAt: meta.created_at, + last_user_message_id: "", + updatedAt: meta.updated_at, + parent_id: meta.parent_id, + link_type: meta.link_type, + task_id: meta.task_id, + task_role: meta.task_role, + agent_id: meta.agent_id, + card_id: meta.card_id, + session_state: meta.session_state, + message_count: meta.message_count, + root_chat_id: meta.root_chat_id, + total_coins: meta.total_coins, + total_lines_added: meta.total_lines_added, + total_lines_removed: meta.total_lines_removed, + tasks_total: meta.tasks_total, + tasks_done: meta.tasks_done, + tasks_failed: meta.tasks_failed, + }; } export const historySlice = createSlice({ name: "history", initialState, reducers: { - saveChat: (state, action: PayloadAction<ChatThread>) => { - if (action.payload.messages.length === 0) return state; - const now = new Date().toISOString(); - - const updatedMode = - action.payload.mode && !isLspChatMode(action.payload.mode) - ? "AGENT" - : action.payload.mode; - - const chat: ChatHistoryItem = { - ...action.payload, - title: action.payload.title - ? action.payload.title - : getFirstUserContentFromChat(action.payload.messages), - createdAt: action.payload.createdAt ?? now, - updatedAt: now, - // TODO: check if this integration may cause any issues - integration: action.payload.integration, - currentMaximumContextTokens: action.payload.currentMaximumContextTokens, - isTitleGenerated: action.payload.isTitleGenerated, - automatic_patch: action.payload.automatic_patch, - mode: updatedMode, - }; + setHistoryLoading: (state, action: PayloadAction<boolean>) => { + state.isLoading = action.payload; + if (action.payload) { + state.loadError = null; + } + }, - const messageMap = { - ...state, - }; - messageMap[chat.id] = chat; + setHistoryLoadError: (state, action: PayloadAction<string | null>) => { + state.loadError = action.payload; + state.isLoading = false; + }, - const messages = Object.values(messageMap); - if (messages.length <= 100) { - return messageMap; + saveChat: (state, action: PayloadAction<ChatThread>) => { + if (action.payload.messages.length === 0) return; + if (isTaskChatLike(action.payload)) return; + const chat = chatThreadToHistoryItem(action.payload); + chat.message_count = action.payload.messages.length; + chat.messages = []; + if (chat.id in state.chats) { + const existing = state.chats[chat.id]; + if ( + existing.isTitleGenerated === true && + chat.isTitleGenerated !== true + ) { + chat.title = existing.title; + chat.isTitleGenerated = true; + } + chat.parent_id = chat.parent_id ?? existing.parent_id; + chat.link_type = chat.link_type ?? existing.link_type; + chat.task_id = chat.task_id ?? existing.task_id; + chat.task_role = chat.task_role ?? existing.task_role; + chat.agent_id = chat.agent_id ?? existing.agent_id; + chat.card_id = chat.card_id ?? existing.card_id; } + state.chats[chat.id] = chat; + }, - const sortedByLastUpdated = messages - .slice(0) - .sort((a, b) => b.updatedAt.localeCompare(a.updatedAt)); - - const newHistory = sortedByLastUpdated.slice(0, 100); - const nextState = newHistory.reduce( - (acc, chat) => ({ ...acc, [chat.id]: chat }), - {}, - ); - return nextState; + hydrateHistory: (state, action: PayloadAction<TrajectoryWithMeta[]>) => { + for (const data of action.payload) { + state.chats[data.id] = trajectoryToHistoryItem(data, { + parent_id: data.parent_id, + link_type: data.link_type, + task_id: data.task_id, + task_role: data.task_role, + agent_id: data.agent_id, + card_id: data.card_id, + }); + } }, - setTitleGenerationCompletionForChat: ( + hydrateHistoryFromMeta: ( state, - action: PayloadAction<string>, + action: PayloadAction<TrajectoryMeta[]>, ) => { - const chatId = action.payload; - state[chatId].isTitleGenerated = true; + for (const meta of action.payload) { + if (!(meta.id in state.chats)) { + state.chats[meta.id] = trajectoryMetaToHistoryItem(meta); + } else { + const existing = state.chats[meta.id]; + existing.title = meta.title; + existing.updatedAt = meta.updated_at; + existing.model = meta.model; + existing.mode = meta.mode as ChatHistoryItem["mode"]; + existing.parent_id = meta.parent_id; + existing.link_type = meta.link_type; + existing.task_id = meta.task_id; + existing.task_role = meta.task_role; + existing.agent_id = meta.agent_id; + existing.card_id = meta.card_id; + existing.session_state = meta.session_state; + existing.message_count = meta.message_count; + existing.root_chat_id = meta.root_chat_id; + existing.total_coins = meta.total_coins; + existing.total_lines_added = meta.total_lines_added; + existing.total_lines_removed = meta.total_lines_removed; + existing.tasks_total = meta.tasks_total; + existing.tasks_done = meta.tasks_done; + existing.tasks_failed = meta.tasks_failed; + } + } }, - markChatAsUnread: (state, action: PayloadAction<string>) => { - const chatId = action.payload; - state[chatId].read = false; + setPagination: ( + state, + action: PayloadAction<{ cursor: string | null; hasMore: boolean }>, + ) => { + state.pagination.cursor = action.payload.cursor; + state.pagination.hasMore = action.payload.hasMore; }, - markChatAsRead: (state, action: PayloadAction<string>) => { - const chatId = action.payload; - state[chatId].read = true; + deleteChatById: (state, action: PayloadAction<string>) => { + const { [action.payload]: _, ...rest } = state.chats; + state.chats = rest; }, - deleteChatById: (state, action: PayloadAction<string>) => { - return Object.entries(state).reduce<Record<string, ChatHistoryItem>>( - (acc, [key, value]) => { - if (key === action.payload) return acc; - return { ...acc, [key]: value }; - }, - {}, - ); + upsertChatStub: ( + state, + action: PayloadAction<{ + id: string; + title?: string; + model?: string; + session_state?: ChatHistoryItem["session_state"]; + parent_id?: string; + link_type?: string; + }>, + ) => { + const { id, title, model, session_state, parent_id, link_type } = + action.payload; + if (id in state.chats) { + if (title) state.chats[id].title = title; + if (model) state.chats[id].model = model; + if (session_state) state.chats[id].session_state = session_state; + if (parent_id) state.chats[id].parent_id = parent_id; + if (link_type) state.chats[id].link_type = link_type; + return; + } + const now = new Date().toISOString(); + state.chats[id] = { + id, + title: title ?? "New Chat", + model: model ?? "", + mode: "AGENT", + tool_use: "agent", + messages: [], + boost_reasoning: false, + context_tokens_cap: undefined, + include_project_info: true, + increase_max_tokens: false, + project_name: undefined, + isTitleGenerated: false, + createdAt: now, + last_user_message_id: "", + updatedAt: now, + session_state: session_state ?? "idle", + message_count: 0, + parent_id, + link_type, + }; }, + updateChatTitleById: ( state, action: PayloadAction<{ chatId: string; newTitle: string }>, ) => { - state[action.payload.chatId].title = action.payload.newTitle; + if (action.payload.chatId in state.chats) { + state.chats[action.payload.chatId].title = action.payload.newTitle; + } }, + + updateChatMetaById: ( + state, + action: PayloadAction<{ + id: string; + title?: string; + isTitleGenerated?: boolean; + updatedAt?: string; + session_state?: ChatHistoryItem["session_state"]; + message_count?: number; + parent_id?: string; + link_type?: string; + root_chat_id?: string; + total_coins?: number; + total_lines_added?: number; + total_lines_removed?: number; + model?: string; + mode?: string; + tasks_total?: number; + tasks_done?: number; + tasks_failed?: number; + }>, + ) => { + if (!(action.payload.id in state.chats)) return; + const chat = state.chats[action.payload.id]; + if (action.payload.title !== undefined) { + chat.title = action.payload.title; + } + if (action.payload.isTitleGenerated !== undefined) { + chat.isTitleGenerated = action.payload.isTitleGenerated; + } + if (action.payload.updatedAt !== undefined) { + chat.updatedAt = action.payload.updatedAt; + } + if (action.payload.session_state !== undefined) { + chat.session_state = action.payload.session_state; + } + if (action.payload.message_count !== undefined) { + chat.message_count = action.payload.message_count; + } + if (action.payload.parent_id !== undefined) { + chat.parent_id = action.payload.parent_id; + } + if (action.payload.link_type !== undefined) { + chat.link_type = action.payload.link_type; + } + if (action.payload.root_chat_id !== undefined) { + chat.root_chat_id = action.payload.root_chat_id; + } + if (action.payload.total_coins !== undefined) { + chat.total_coins = action.payload.total_coins; + } + if (action.payload.total_lines_added !== undefined) { + chat.total_lines_added = action.payload.total_lines_added; + } + if (action.payload.total_lines_removed !== undefined) { + chat.total_lines_removed = action.payload.total_lines_removed; + } + if (action.payload.model !== undefined) { + chat.model = action.payload.model; + } + if (action.payload.mode !== undefined) { + chat.mode = action.payload.mode as ChatHistoryItem["mode"]; + } + if (action.payload.tasks_total !== undefined) { + chat.tasks_total = action.payload.tasks_total; + } + if (action.payload.tasks_done !== undefined) { + chat.tasks_done = action.payload.tasks_done; + } + if (action.payload.tasks_failed !== undefined) { + chat.tasks_failed = action.payload.tasks_failed; + } + }, + clearHistory: () => { - return {}; + return { + chats: {}, + isLoading: false, + loadError: null, + pagination: { cursor: null, hasMore: true }, + }; }, upsertToolCallIntoHistory: ( @@ -163,9 +506,9 @@ export const historySlice = createSlice({ } >, ) => { - if (!(action.payload.chatId in state)) return; + if (!(action.payload.chatId in state.chats)) return; maybeAppendToolCallResultFromIdeToMessages( - state[action.payload.chatId].messages, + state.chats[action.payload.chatId].messages, action.payload.toolCallId, action.payload.accepted, action.payload.replaceOnly, @@ -173,31 +516,43 @@ export const historySlice = createSlice({ }, }, selectors: { + selectHistoryIsLoading: (state): boolean => state.isLoading, + getChatById: (state, id: string): ChatHistoryItem | null => { - if (!(id in state)) return null; - return state[id]; + if (!(id in state.chats)) return null; + return state.chats[id]; }, getHistory: (state): ChatHistoryItem[] => - Object.values(state).sort((a, b) => - b.updatedAt.localeCompare(a.updatedAt), - ), + Object.values(state.chats) + .filter((item) => !isTaskChatLike(item)) + .sort((a, b) => b.updatedAt.localeCompare(a.updatedAt)), + + getHistoryTree: (state): HistoryTreeNode[] => buildHistoryTree(state.chats), }, }); export const { + setHistoryLoading, + setHistoryLoadError, saveChat, + hydrateHistory, + hydrateHistoryFromMeta, + setPagination, deleteChatById, - markChatAsUnread, - markChatAsRead, - setTitleGenerationCompletionForChat, + upsertChatStub, updateChatTitleById, + updateChatMetaById, clearHistory, upsertToolCallIntoHistory, } = historySlice.actions; -export const { getChatById, getHistory } = historySlice.selectors; +export const { + selectHistoryIsLoading, + getChatById, + getHistory, + getHistoryTree, +} = historySlice.selectors; -// We could use this or reduce-reducers packages export const historyMiddleware = createListenerMiddleware(); const startHistoryListening = historyMiddleware.startListening.withTypes< RootState, @@ -205,78 +560,19 @@ const startHistoryListening = historyMiddleware.startListening.withTypes< >(); startHistoryListening({ - actionCreator: doneStreaming, + actionCreator: applyChatEvent, effect: (action, listenerApi) => { + const event = action.payload; + if (event.type !== "stream_finished") return; + if (event.finish_reason === "abort" || event.finish_reason === "error") + return; + const state = listenerApi.getState(); - const isTitleGenerationEnabled = state.chat.title_generation_enabled; - - const thread = - action.payload.id in state.chat.cache - ? state.chat.cache[action.payload.id] - : state.chat.thread; - - const lastMessage = thread.messages.slice(-1)[0]; - const isTitleGenerated = thread.isTitleGenerated; - // Checking for reliable chat pause - if ( - thread.messages.length && - isAssistantMessage(lastMessage) && - !lastMessage.tool_calls - ) { - // Getting user message - const firstUserMessage = thread.messages.find(isUserMessage); - if (firstUserMessage) { - // Checking if chat title is already generated, if not - generating it - if (!isTitleGenerated && isTitleGenerationEnabled) { - listenerApi - .dispatch( - chatGenerateTitleThunk({ - messages: [firstUserMessage], - chatId: state.chat.thread.id, - }), - ) - .unwrap() - .then((response) => { - if (isChatGetTitleActionPayload(response)) { - if (typeof response.title === "string") { - listenerApi.dispatch( - saveChat({ - ...thread, - title: response.title, - }), - ); - listenerApi.dispatch( - setTitleGenerationCompletionForChat(thread.id), - ); - } - } - }) - .catch(() => { - // TODO: handle error in case if not generated, now returning user message as a title - const title = getFirstUserContentFromChat([firstUserMessage]); - listenerApi.dispatch( - saveChat({ - ...thread, - title: title, - }), - ); - }); - } - } - } else { - // Probably chat was paused with uncalled tools - listenerApi.dispatch( - saveChat({ - ...thread, - }), - ); - } - if (state.chat.thread.id === action.payload.id) { - listenerApi.dispatch(saveChat(state.chat.thread)); - } else if (action.payload.id in state.chat.cache) { - listenerApi.dispatch(saveChat(state.chat.cache[action.payload.id])); - listenerApi.dispatch(removeChatFromCache({ id: action.payload.id })); - } + const runtime = state.chat.threads[event.chat_id]; + if (!runtime) return; + const thread = runtime.thread; + + listenerApi.dispatch(saveChat(thread)); }, }); @@ -284,8 +580,10 @@ startHistoryListening({ actionCreator: backUpMessages, effect: (action, listenerApi) => { const state = listenerApi.getState(); - const thread = state.chat.thread; - if (thread.id !== action.payload.id) return; + const runtime = state.chat.threads[action.payload.id]; + if (!runtime) return; + const thread = runtime.thread; + const toSave = { ...thread, messages: action.payload.messages, @@ -296,32 +594,90 @@ startHistoryListening({ }); startHistoryListening({ - actionCreator: chatAskedQuestion, + actionCreator: setChatMode, effect: (action, listenerApi) => { - listenerApi.dispatch(markChatAsUnread(action.payload.id)); + const state = listenerApi.getState(); + const runtime = state.chat.threads[state.chat.current_thread_id]; + if (!runtime) return; + const thread = runtime.thread; + if (!(thread.id in state.history.chats)) return; + + const toSave = { ...thread, mode: action.payload }; + listenerApi.dispatch(saveChat(toSave)); }, }); startHistoryListening({ - actionCreator: restoreChat, + actionCreator: deleteChatById, effect: (action, listenerApi) => { - const chat = listenerApi.getState().chat; - if (chat.thread.id == action.payload.id && chat.streaming) return; - if (action.payload.id in chat.cache) return; - listenerApi.dispatch(markChatAsRead(action.payload.id)); + void listenerApi.dispatch( + trajectoriesApi.endpoints.deleteTrajectory.initiate(action.payload), + ); }, }); startHistoryListening({ - actionCreator: setChatMode, - effect: (action, listenerApi) => { + actionCreator: newChatAction, + effect: (_, listenerApi) => { const state = listenerApi.getState(); - const thread = state.chat.thread; - if (!(thread.id in state.history)) return; + const id = state.chat.current_thread_id; + const runtime = state.chat.threads[id]; + if (!runtime) return; + if (isTaskChatLike(runtime.thread)) return; + listenerApi.dispatch( + upsertChatStub({ + id, + title: runtime.thread.title ? runtime.thread.title : undefined, + model: runtime.thread.model ? runtime.thread.model : undefined, + }), + ); + }, +}); - const toSave = { ...thread, mode: action.payload }; - listenerApi.dispatch(saveChat(toSave)); +startHistoryListening({ + actionCreator: createChatWithId, + effect: (action, listenerApi) => { + if (action.payload.isTaskChat === true || action.payload.taskMeta?.task_id) + return; + listenerApi.dispatch( + upsertChatStub({ + id: action.payload.id, + title: action.payload.title, + model: action.payload.model, + parent_id: action.payload.parentId, + link_type: action.payload.linkType, + }), + ); }, }); -// TODO: add a listener for creating a new chat ? +startHistoryListening({ + actionCreator: restoreChat, + effect: (action, listenerApi) => { + if (isTaskChatLike(action.payload)) return; + listenerApi.dispatch( + upsertChatStub({ + id: action.payload.id, + title: action.payload.title, + model: action.payload.model, + }), + ); + }, +}); + +startHistoryListening({ + actionCreator: switchToThread, + effect: (action, listenerApi) => { + const state = listenerApi.getState(); + const runtime = state.chat.threads[action.payload.id]; + if (!runtime) return; + if (isTaskChatLike(runtime.thread)) return; + listenerApi.dispatch( + upsertChatStub({ + id: action.payload.id, + title: runtime.thread.title ? runtime.thread.title : undefined, + model: runtime.thread.model ? runtime.thread.model : undefined, + }), + ); + }, +}); diff --git a/refact-agent/gui/src/features/Integrations/integrationsSlice.tsx b/refact-agent/gui/src/features/Integrations/integrationsSlice.tsx index 1df492d32..53f6e4655 100644 --- a/refact-agent/gui/src/features/Integrations/integrationsSlice.tsx +++ b/refact-agent/gui/src/features/Integrations/integrationsSlice.tsx @@ -18,13 +18,13 @@ export const integrationsSlice = createSlice({ reducers: { addToCacheOnMiss: (state, action: PayloadAction<Integration>) => { const key = action.payload.integr_config_path; - if (key in state.cachedForms) return state; + if (key in state.cachedForms) return; state.cachedForms[key] = action.payload.integr_values; }, //TODO: could just be the path removeFromCache: (state, action: PayloadAction<string>) => { - if (!(action.payload in state.cachedForms)) return state; + if (!(action.payload in state.cachedForms)) return; const nextCache = Object.entries( state.cachedForms, diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.md b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.md new file mode 100644 index 000000000..d05ab0b70 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.md @@ -0,0 +1,123 @@ +# KnowledgeGraphView Component + +## Overview + +A simplified, pure graph renderer for displaying memory nodes and their relationships. This component focuses exclusively on document-to-document connections without filters, modes, or UI controls. + +## Features + +- **Doc-only rendering**: Only displays `doc_code`, `doc_decision`, `doc_preference`, `doc_pattern`, and `doc_lesson` nodes +- **Edge filtering**: Automatically filters edges to only show connections between doc nodes +- **Node coloring by kind**: + - `doc_code` → Blue (#3B82F6) + - `doc_decision` → Purple (#8B5CF6) + - `doc_preference` → Green (#10B981) + - `doc_pattern` → Amber (#F59E0B) + - `doc_lesson` → Cyan (#06B6D4) +- **Node sizing by degree**: More connected nodes appear larger +- **Interactive selection**: Click nodes to select, click background to deselect +- **Force-directed layout**: Uses fcose algorithm for optimal link visualization +- **Zoom-based labels**: Node labels appear on hover or when zoomed in +- **Empty state handling**: Shows "No linked memories" when no nodes available + +## Props + +```typescript +interface KnowledgeGraphViewProps { + nodes: KnowledgeGraphNode[]; // All nodes from API + edges: KnowledgeGraphEdge[]; // All edges from API + selectedId: string | null; // Currently selected node ID + onSelectId: (id: string | null) => void; // Selection callback + isLoading?: boolean; // Show loading state +} +``` + +## Usage + +```typescript +import { KnowledgeGraphView } from './features/Knowledge'; + +function MyComponent() { + const [selectedId, setSelectedId] = useState<string | null>(null); + const { data: graph, isLoading } = useGetKnowledgeGraphQuery(); + + return ( + <KnowledgeGraphView + nodes={graph?.nodes ?? []} + edges={graph?.edges ?? []} + selectedId={selectedId} + onSelectId={setSelectedId} + isLoading={isLoading} + /> + ); +} +``` + +## Filtering Behavior + +### Nodes + +- **Included**: `doc_code`, `doc_decision`, `doc_preference`, `doc_pattern`, `doc_lesson` +- **Excluded**: `doc_deprecated`, `doc_trajectory`, `tag`, `file`, `entity`, and any other types + +### Edges + +- Only edges where both source AND target are included doc nodes +- All other edges are filtered out + +## Layout Configuration + +Uses fcose (force-directed) layout with these parameters: + +- `idealEdgeLength`: 120px +- `nodeRepulsion`: 5000 +- `edgeElasticity`: 0.5 +- `animationDuration`: 500ms + +## Styling + +- Node size: Maps degree (1-20) to size (30-60px) +- Selected node: 3px purple border + slightly larger +- Edges: Gray (#9CA3AF) with arrow, 40% opacity +- Labels: Hidden by default, shown on hover or zoom > 1.2 + +## Differences from KnowledgeGraph.tsx + +| Feature | KnowledgeGraph | KnowledgeGraphView | +| ---------------- | -------------------------------- | ------------------ | +| Filter UI | ✅ Checkboxes for kinds/statuses | ❌ None | +| Sidebar | ✅ Stats + node details | ❌ None | +| Focus mode | ✅ 1-hop/2-hop traversal | ❌ None | +| Overview mode | ✅ Concentric layout | ❌ None | +| Node groups | ✅ Tags/files/entities toggles | ❌ None | +| Deprecated nodes | ✅ Optional display | ❌ Always hidden | +| Trajectory nodes | ✅ Optional display | ❌ Always hidden | +| Layout | Concentric or fcose | fcose only | + +## Testing + +Run tests: + +```bash +npm test -- --run src/features/Knowledge/KnowledgeGraphView.test.tsx +``` + +Test coverage: + +- ✅ Empty state rendering +- ✅ Node and edge rendering +- ✅ Non-doc node filtering +- ✅ Edge filtering (doc-doc only) +- ✅ Deprecated/trajectory exclusion +- ✅ Empty edges handling +- ✅ Loading state +- ✅ All doc node types +- ✅ Selection callback + +## Implementation Notes + +- Uses `react-cytoscapejs` for graph rendering +- Cytoscape event handlers have ESLint suppressions due to library's poor TypeScript support +- Layout runs on every element change (nodes/edges update) +- Selected node auto-centers in viewport +- No console errors on unmount (layout properly stopped) diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.module.css b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.module.css new file mode 100644 index 000000000..1e191346a --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.module.css @@ -0,0 +1,21 @@ +.emptyState { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + height: 100%; + gap: var(--space-3); + padding: var(--space-4); + text-align: center; + color: var(--gray-10); +} + +.emptyStateIcon { + font-size: 48px; + opacity: 0.3; +} + +.emptyStateText { + font-size: 13px; + line-height: 1.5; +} diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.test.tsx b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.test.tsx new file mode 100644 index 000000000..60041b8fb --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.test.tsx @@ -0,0 +1,275 @@ +import { describe, it, expect, vi } from "vitest"; +import { render, screen } from "@testing-library/react"; +import { KnowledgeGraphView } from "./KnowledgeGraphView"; +import type { + KnowledgeGraphNode, + KnowledgeGraphEdge, +} from "../../services/refact/types"; + +vi.mock("react-cytoscapejs", () => ({ + default: ({ + cy, + elements, + }: { + cy?: (cy: unknown) => void; + elements: unknown[]; + }) => { + if (cy) { + const mockNode = { + data: vi.fn((key: string) => { + if (key === "label") return "Mock Label"; + return "mock-value"; + }), + style: vi.fn(), + id: vi.fn(() => "mock-id"), + }; + + const mockCollection = { + forEach: vi.fn((callback: (node: unknown) => void) => { + callback(mockNode); + }), + length: 1, + select: vi.fn(), + unselect: vi.fn(), + }; + + const mockCy = { + on: vi.fn(), + off: vi.fn(), + resize: vi.fn(), + zoom: vi.fn(() => 1), + center: vi.fn(), + layout: vi.fn(() => ({ + run: vi.fn(), + stop: vi.fn(), + })), + elements: vi.fn(() => mockCollection), + animate: vi.fn(), + $id: vi.fn(() => mockCollection), + $: vi.fn(() => mockCollection), + }; + cy(mockCy); + } + return <div data-testid="cytoscape-mock">{elements.length} elements</div>; + }, +})); + +const createDocNode = ( + id: string, + type: string, + label: string, +): KnowledgeGraphNode => ({ + id, + node_type: type, + label, +}); + +const createEdge = ( + source: string, + target: string, + type: string, +): KnowledgeGraphEdge => ({ + source, + target, + edge_type: type, +}); + +describe("KnowledgeGraphView", () => { + it("renders empty state when no nodes", () => { + render( + <KnowledgeGraphView + nodes={[]} + edges={[]} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByText("No linked memories")).toBeInTheDocument(); + }); + + it("renders nodes and edges correctly", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code Memory"), + createDocNode("doc2", "doc_decision", "Decision Memory"), + ]; + const edges = [createEdge("doc1", "doc2", "relates_to")]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByTestId("cytoscape-mock")).toBeInTheDocument(); + expect(screen.getByText("3 elements")).toBeInTheDocument(); + }); + + it("filters out non-doc nodes", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code Memory"), + createDocNode("tag1", "tag", "Tag Node"), + createDocNode("file1", "file", "File Node"), + createDocNode("doc2", "doc_decision", "Decision Memory"), + ]; + const edges = [createEdge("doc1", "doc2", "relates_to")]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByText("3 elements")).toBeInTheDocument(); + }); + + it("filters out edges with non-doc nodes", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code Memory"), + createDocNode("tag1", "tag", "Tag Node"), + createDocNode("doc2", "doc_decision", "Decision Memory"), + ]; + const edges = [ + createEdge("doc1", "doc2", "relates_to"), + createEdge("doc1", "tag1", "tagged_with"), + createEdge("tag1", "doc2", "tagged_with"), + ]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByText("3 elements")).toBeInTheDocument(); + }); + + it("filters deprecated and trajectory nodes", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code Memory"), + createDocNode("doc2", "doc_deprecated", "Deprecated Memory"), + createDocNode("doc3", "doc_trajectory", "Trajectory Memory"), + createDocNode("doc4", "doc_preference", "Preference Memory"), + ]; + const edges = [ + createEdge("doc1", "doc2", "relates_to"), + createEdge("doc1", "doc4", "relates_to"), + ]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByText("3 elements")).toBeInTheDocument(); + }); + + it("handles empty edges gracefully", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code Memory"), + createDocNode("doc2", "doc_decision", "Decision Memory"), + ]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={[]} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByTestId("cytoscape-mock")).toBeInTheDocument(); + expect(screen.getByText("2 elements")).toBeInTheDocument(); + }); + + it("shows loading state", () => { + render( + <KnowledgeGraphView + nodes={[]} + edges={[]} + selectedId={null} + onSelectId={vi.fn()} + isLoading={true} + />, + ); + + expect(screen.getByText("Loading graph...")).toBeInTheDocument(); + }); + + it("calls onSelectId with correct ID on node click", () => { + const onSelectId = vi.fn(); + const nodes = [createDocNode("doc1", "doc_code", "Code Memory")]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={[]} + selectedId={null} + onSelectId={onSelectId} + />, + ); + + expect(screen.getByTestId("cytoscape-mock")).toBeInTheDocument(); + }); + + it("renders all doc node types", () => { + const nodes = [ + createDocNode("doc1", "doc_code", "Code"), + createDocNode("doc2", "doc_decision", "Decision"), + createDocNode("doc3", "doc_preference", "Preference"), + createDocNode("doc4", "doc_pattern", "Pattern"), + createDocNode("doc5", "doc_lesson", "Lesson"), + ]; + const edges = [ + createEdge("doc1", "doc2", "relates_to"), + createEdge("doc2", "doc3", "relates_to"), + createEdge("doc3", "doc4", "relates_to"), + createEdge("doc4", "doc5", "relates_to"), + ]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + expect(screen.getByText("9 elements")).toBeInTheDocument(); + }); + + it("renders plain 'doc' node type (without underscore)", () => { + const nodes = [ + createDocNode("doc1", "doc", "Plain Doc Memory"), + createDocNode("doc2", "doc_code", "Code Memory"), + ]; + const edges = [createEdge("doc1", "doc2", "relates_to")]; + + render( + <KnowledgeGraphView + nodes={nodes} + edges={edges} + selectedId={null} + onSelectId={vi.fn()} + />, + ); + + // Should have 2 nodes + 1 edge = 3 elements + expect(screen.getByText("3 elements")).toBeInTheDocument(); + }); +}); diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.tsx b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.tsx new file mode 100644 index 000000000..fdb1416eb --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeGraphView.tsx @@ -0,0 +1,331 @@ +import { useEffect, useRef, useState, useMemo, useCallback } from "react"; +import CytoscapeComponent from "react-cytoscapejs"; +import cytoscape from "cytoscape"; +import type Cytoscape from "cytoscape"; +import fcose from "cytoscape-fcose"; +import { Flex, Text } from "@radix-ui/themes"; +import type { + KnowledgeGraphNode, + KnowledgeGraphEdge, +} from "../../services/refact/types"; +import styles from "./KnowledgeGraphView.module.css"; + +cytoscape.use(fcose); + +type CytoscapeElement = { + data: { + id: string; + label: string; + type?: string; + source?: string; + target?: string; + degree?: number; + }; + group?: "nodes" | "edges"; +}; + +interface KnowledgeGraphViewProps { + nodes: KnowledgeGraphNode[]; + edges: KnowledgeGraphEdge[]; + selectedId: string | null; + onSelectId: (id: string | null) => void; + isLoading?: boolean; +} + +const isDocNode = (node: KnowledgeGraphNode): boolean => { + const nodeType = node.node_type; + if (nodeType === "doc_deprecated" || nodeType === "doc_trajectory") { + return false; + } + return nodeType === "doc" || nodeType.startsWith("doc_"); +}; +const NODE_COLORS: Record<string, string> = { + code: "#3B82F6", + decision: "#8B5CF6", + preference: "#10B981", + pattern: "#F59E0B", + lesson: "#06B6D4", + default: "#8B5CF6", +}; + +export function KnowledgeGraphView({ + nodes, + edges, + selectedId, + onSelectId, + isLoading = false, +}: KnowledgeGraphViewProps) { + const cyRef = useRef<Cytoscape.Core | null>(null); + const layoutRef = useRef<Cytoscape.Layouts | null>(null); + const containerRef = useRef<HTMLDivElement | null>(null); + const [cyReady, setCyReady] = useState(false); + const cyReadyRef = useRef(false); + + const filteredNodes = useMemo(() => { + return nodes.filter((node) => isDocNode(node)); + }, [nodes]); + + const filteredEdges = useMemo(() => { + const nodeIds = new Set(filteredNodes.map((n) => n.id)); + return edges.filter( + (edge) => nodeIds.has(edge.source) && nodeIds.has(edge.target), + ); + }, [filteredNodes, edges]); + + const degreeMap = useMemo(() => { + const map = new Map<string, number>(); + filteredEdges.forEach((edge) => { + map.set(edge.source, (map.get(edge.source) ?? 0) + 1); + map.set(edge.target, (map.get(edge.target) ?? 0) + 1); + }); + filteredNodes.forEach((node) => { + if (!map.has(node.id)) map.set(node.id, 1); + }); + return map; + }, [filteredEdges, filteredNodes]); + + const elements: CytoscapeElement[] = useMemo(() => { + return [ + ...filteredNodes.map((node) => ({ + data: { + id: node.id, + label: node.label, + type: node.kind ?? "default", + degree: degreeMap.get(node.id) ?? 1, + }, + group: "nodes" as const, + })), + ...filteredEdges.map((edge) => ({ + data: { + id: `${edge.source}-${edge.target}-${edge.edge_type}`, + source: edge.source, + target: edge.target, + label: edge.edge_type, + }, + group: "edges" as const, + })), + ]; + }, [filteredNodes, filteredEdges, degreeMap]); + + const stylesheet: unknown[] = useMemo(() => { + return [ + { + selector: "node", + style: { + "background-color": "#8B5CF6", + label: "", + "font-size": "12px", + color: "#ffffff", + "text-valign": "center", + "text-halign": "center", + width: "mapData(degree, 1, 20, 30, 60)", + height: "mapData(degree, 1, 20, 30, 60)", + "text-wrap": "wrap", + "text-max-width": "80px", + }, + }, + ...Object.entries(NODE_COLORS) + .filter(([type]) => type !== "default") + .map(([type, color]) => ({ + selector: `node[type="${type}"]`, + style: { + "background-color": color, + }, + })), + { + selector: "edge", + style: { + width: 1, + "line-color": "#9CA3AF", + "target-arrow-color": "#9CA3AF", + "target-arrow-shape": "triangle", + "curve-style": "bezier", + opacity: 0.4, + }, + }, + { + selector: "node:selected", + style: { + "border-width": 5, + "border-color": "#FFFFFF", + "border-opacity": 1, + width: "mapData(degree, 1, 20, 40, 80)", + height: "mapData(degree, 1, 20, 40, 80)", + "background-color": "#A78BFA", + "box-shadow": "0 0 20px #8B5CF6", + "z-index": 999, + }, + }, + ]; + }, []); + + const handleNodeClick = useCallback( + (nodeId: string) => { + onSelectId(nodeId); + }, + [onSelectId], + ); + + const handleBackgroundClick = useCallback(() => { + onSelectId(null); + }, [onSelectId]); + + useEffect(() => { + if (!cyRef.current || !cyReady) return; + + const handleZoom = () => { + if (!cyRef.current) return; + const zoom = cyRef.current.zoom(); + cyRef.current.elements("node").forEach((node) => { + const label = zoom > 1.2 ? (node.data("label") as string) : ""; + node.style("label", label); + }); + }; + + cyRef.current.on("tap", "node", (e: Cytoscape.EventObject) => { + handleNodeClick((e.target as Cytoscape.NodeSingular).id()); + }); + + cyRef.current.on("tap", (e: Cytoscape.EventObject) => { + if (e.target === cyRef.current) { + handleBackgroundClick(); + } + }); + + cyRef.current.on("zoom", handleZoom); + + cyRef.current.on("mouseover", "node", (e: Cytoscape.EventObject) => { + (e.target as Cytoscape.NodeSingular).style( + "label", + (e.target as Cytoscape.NodeSingular).data("label") as string, + ); + }); + + cyRef.current.on("mouseout", "node", (e: Cytoscape.EventObject) => { + const zoom = cyRef.current?.zoom() ?? 1; + if (zoom <= 1.2) { + (e.target as Cytoscape.NodeSingular).style("label", ""); + } + }); + + return () => { + if (cyRef.current) { + cyRef.current.off("tap"); + cyRef.current.off("zoom"); + cyRef.current.off("mouseover"); + cyRef.current.off("mouseout"); + } + }; + }, [cyReady, handleNodeClick, handleBackgroundClick]); + + useEffect(() => { + if (!cyReady || !containerRef.current || !cyRef.current) return; + if (typeof ResizeObserver === "undefined") return; + + const ro = new ResizeObserver(() => { + cyRef.current?.resize(); + }); + ro.observe(containerRef.current); + return () => ro.disconnect(); + }, [cyReady]); + + useEffect(() => { + if (!cyRef.current || !cyReady) return; + + if (layoutRef.current) { + layoutRef.current.stop(); + } + + const layoutOpts: Cytoscape.LayoutOptions & Record<string, unknown> = { + name: "fcose", + animationDuration: 500, + randomize: true, + randomSeed: 42, + idealEdgeLength: 220, + nodeRepulsion: 18000, + nodeSeparation: 90, + edgeElasticity: 0.35, + gravity: 0.15, + packComponents: true, + componentSpacing: 140, + padding: 30, + }; + + layoutRef.current = cyRef.current.layout(layoutOpts); + + requestAnimationFrame(() => { + cyRef.current?.resize(); + if (layoutRef.current) { + layoutRef.current.run(); + } + }); + }, [cyReady, elements]); + + useEffect(() => { + if (!cyRef.current || !cyReady) return; + + cyRef.current.$("node:selected").unselect(); + + if (selectedId) { + const node = cyRef.current.$id(selectedId); + if (node.length > 0) { + node.select(); + + const currentZoom = cyRef.current.zoom(); + const targetZoom = Math.max(currentZoom, 1.5); + + cyRef.current.animate({ + center: { eles: node }, + zoom: targetZoom, + duration: 400, + easing: "ease-out", + }); + } + } + }, [cyReady, selectedId]); + + if (isLoading) { + return ( + <Flex align="center" justify="center" height="100%"> + <Text>Loading graph...</Text> + </Flex> + ); + } + + if (filteredNodes.length === 0) { + return ( + <div className={styles.emptyState}> + <div className={styles.emptyStateIcon}>🔍</div> + <div className={styles.emptyStateText}> + <p>No linked memories</p> + </div> + </div> + ); + } + + return ( + <div + ref={containerRef} + style={{ + width: "100%", + height: "100%", + display: "flex", + overflow: "hidden", + }} + > + <CytoscapeComponent + elements={elements} + style={{ width: "100%", height: "100%" }} + stylesheet={stylesheet as Cytoscape.StylesheetStyle[]} + cy={(cy) => { + cyRef.current = cy; + if (!cyReadyRef.current) { + cyReadyRef.current = true; + setCyReady(true); + cy.resize(); + } + }} + /> + </div> + ); +} diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.module.css b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.module.css new file mode 100644 index 000000000..612b5a2f5 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.module.css @@ -0,0 +1,49 @@ +.workspace { + display: flex; + flex-direction: column; + height: 100%; + gap: var(--space-3); + padding: var(--space-3); + background: var(--color-surface); + box-sizing: border-box; + min-height: 0; + overflow: hidden; +} + +.editorSection { + flex: 1; + min-height: 0; + overflow: hidden; + border: 1px solid var(--gray-a7); + border-radius: var(--radius-2); +} + +.listSection { + flex: 0 0 30%; + min-height: 0; + overflow: hidden; + border: 1px solid var(--gray-a7); + border-radius: var(--radius-2); + background: var(--color-panel); +} + +.graphSection { + flex: 1; + min-height: 0; + overflow: hidden; + border: 1px solid var(--gray-a7); + border-radius: var(--radius-2); + background: var(--color-panel); + display: flex; + flex-direction: column; +} + +.error { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + color: var(--red-11); + padding: var(--space-4); + text-align: center; +} diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.test.tsx b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.test.tsx new file mode 100644 index 000000000..29d360224 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.test.tsx @@ -0,0 +1,345 @@ +import { describe, it, expect, vi, beforeEach } from "vitest"; +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { KnowledgeWorkspace } from "./KnowledgeWorkspace"; +import type { KnowledgeGraphResponse } from "../../services/refact/types"; + +const mockGraphData: KnowledgeGraphResponse = { + nodes: [ + { + id: "doc1", + node_type: "doc_code", + label: "Code Memory 1", + title: "Code Memory 1", + content: "This is code memory content", + tags: ["rust", "backend"], + created: "2024-01-10T10:00:00Z", + file_path: "/path/to/memory1.md", + kind: "code", + }, + { + id: "doc2", + node_type: "doc_decision", + label: "Decision Memory 2", + title: "Decision Memory 2", + content: "This is decision memory content", + tags: ["architecture"], + created: "2024-01-09T10:00:00Z", + file_path: "/path/to/memory2.md", + kind: "decision", + }, + { + id: "doc3", + node_type: "doc_preference", + label: "Preference Memory 3", + title: "Preference Memory 3", + content: "This is preference memory content", + tags: ["style"], + created: "2024-01-08T10:00:00Z", + file_path: "/path/to/memory3.md", + kind: "preference", + }, + { id: "doc4", node_type: "doc_deprecated", label: "Deprecated Memory" }, + { id: "doc5", node_type: "doc_trajectory", label: "Trajectory Memory" }, + { id: "tag1", node_type: "tag", label: "Tag Node" }, + ], + edges: [ + { source: "doc1", target: "doc2", edge_type: "relates_to" }, + { source: "doc2", target: "doc3", edge_type: "relates_to" }, + { source: "doc1", target: "tag1", edge_type: "tagged_with" }, + ], + stats: { + doc_count: 5, + tag_count: 1, + file_count: 0, + entity_count: 0, + edge_count: 3, + active_docs: 3, + deprecated_docs: 1, + trajectory_count: 1, + }, +}; + +let mockGraphResponse: KnowledgeGraphResponse | null = mockGraphData; +let mockIsLoading = false; +let mockError: { message: string } | null = null; + +vi.mock("../../services/refact/knowledgeGraphApi", () => ({ + useGetKnowledgeGraphQuery: () => ({ + data: mockGraphResponse, + isLoading: mockIsLoading, + error: mockError, + }), + useUpdateMemoryMutation: () => [vi.fn(), { isLoading: false }], + useDeleteMemoryMutation: () => [vi.fn()], +})); + +interface MockMemory { + memid: string; + title: string; +} + +interface MockNode { + id: string; + label: string; +} + +interface MockMemoryListProps { + memories: MockMemory[]; + selectedId: string | null; + onSelectId: (id: string) => void; + linkedIds: Set<string>; +} + +interface MockEdge { + source: string; + target: string; + edge_type: string; +} + +interface MockGraphViewProps { + nodes: MockNode[]; + edges: MockEdge[]; + onSelectId: (id: string) => void; + isLoading: boolean; +} + +interface MockDetailsEditorProps { + memory: { title: string } | null; + onMemoryDeleted: () => void; +} + +vi.mock("./MemoryListView", () => ({ + MemoryListView: ({ + memories, + selectedId, + onSelectId, + linkedIds, + }: MockMemoryListProps) => ( + <div data-testid="memory-list"> + <div>Memories: {memories.length}</div> + <div>Selected: {selectedId ?? "none"}</div> + <div>Linked: {linkedIds.size}</div> + {memories.map((m) => ( + <button key={m.memid} onClick={() => onSelectId(m.memid)}> + {m.title} + </button> + ))} + </div> + ), +})); + +vi.mock("./KnowledgeGraphView", () => ({ + KnowledgeGraphView: ({ + nodes, + edges, + onSelectId, + isLoading, + }: MockGraphViewProps) => ( + <div data-testid="graph-view"> + <div>Nodes: {nodes.length}</div> + <div>Edges: {edges.length}</div> + <div>Loading: {isLoading ? "yes" : "no"}</div> + {nodes.map((n) => ( + <button key={n.id} onClick={() => onSelectId(n.id)}> + {n.label} + </button> + ))} + </div> + ), +})); + +vi.mock("./MemoryDetailsEditor", () => ({ + MemoryDetailsEditor: ({ + memory, + onMemoryDeleted, + }: MockDetailsEditorProps) => ( + <div data-testid="details-editor"> + <div>Memory: {memory ? memory.title : "none"}</div> + <button onClick={onMemoryDeleted}>Delete</button> + </div> + ), +})); + +describe("KnowledgeWorkspace", () => { + beforeEach(() => { + mockGraphResponse = mockGraphData; + mockIsLoading = false; + mockError = null; + }); + + it("renders all three panels", () => { + render(<KnowledgeWorkspace />); + + expect(screen.getByTestId("memory-list")).toBeInTheDocument(); + expect(screen.getByTestId("graph-view")).toBeInTheDocument(); + expect(screen.getByTestId("details-editor")).toBeInTheDocument(); + }); + + it("filters out deprecated and trajectory nodes", () => { + render(<KnowledgeWorkspace />); + + expect(screen.getByText("Memories: 3")).toBeInTheDocument(); + expect(screen.queryByText("Deprecated Memory")).not.toBeInTheDocument(); + expect(screen.queryByText("Trajectory Memory")).not.toBeInTheDocument(); + }); + + it("computes linked IDs correctly", () => { + render(<KnowledgeWorkspace />); + + expect(screen.getByText("Linked: 3")).toBeInTheDocument(); + }); + + it("shows only linked nodes in graph", () => { + render(<KnowledgeWorkspace />); + + const graphView = screen.getByTestId("graph-view"); + expect(graphView).toHaveTextContent("Nodes: 3"); + expect(graphView).toHaveTextContent("Edges: 2"); + }); + + it("syncs selection between list and graph", async () => { + const user = userEvent.setup(); + render(<KnowledgeWorkspace />); + + const listButton = screen.getAllByRole("button", { + name: /Code Memory 1/i, + })[0]; + await user.click(listButton); + + expect(screen.getByText("Selected: doc1")).toBeInTheDocument(); + expect(screen.getByText("Memory: Code Memory 1")).toBeInTheDocument(); + }); + + it("updates editor when selection changes", async () => { + const user = userEvent.setup(); + render(<KnowledgeWorkspace />); + + const button1 = screen.getAllByRole("button", { + name: /Code Memory 1/i, + })[0]; + await user.click(button1); + expect(screen.getByText("Memory: Code Memory 1")).toBeInTheDocument(); + + const button2 = screen.getAllByRole("button", { + name: /Decision Memory 2/i, + })[0]; + await user.click(button2); + expect(screen.getByText("Memory: Decision Memory 2")).toBeInTheDocument(); + }); + + it("clears selection when memory is deleted", async () => { + const user = userEvent.setup(); + render(<KnowledgeWorkspace />); + + const selectButton = screen.getAllByRole("button", { + name: /Code Memory 1/i, + })[0]; + await user.click(selectButton); + expect(screen.getByText("Memory: Code Memory 1")).toBeInTheDocument(); + + const deleteButton = screen.getByRole("button", { name: /Delete/i }); + await user.click(deleteButton); + + expect(screen.getByText("Memory: none")).toBeInTheDocument(); + expect(screen.getByText("Selected: none")).toBeInTheDocument(); + }); + + it("shows error state when graph fails to load", () => { + mockError = { message: "Failed to fetch" }; + render(<KnowledgeWorkspace />); + + expect( + screen.getByText("Failed to load knowledge graph"), + ).toBeInTheDocument(); + }); + + it("handles empty graph data", () => { + mockGraphResponse = { + nodes: [], + edges: [], + stats: { + doc_count: 0, + tag_count: 0, + file_count: 0, + entity_count: 0, + edge_count: 0, + active_docs: 0, + deprecated_docs: 0, + trajectory_count: 0, + }, + }; + render(<KnowledgeWorkspace />); + + expect(screen.getByText("Memories: 0")).toBeInTheDocument(); + expect(screen.getByText("Nodes: 0")).toBeInTheDocument(); + expect(screen.getByText("Edges: 0")).toBeInTheDocument(); + }); + + it("converts graph nodes to memory records", () => { + render(<KnowledgeWorkspace />); + + expect(screen.getAllByText("Code Memory 1").length).toBeGreaterThan(0); + expect(screen.getAllByText("Decision Memory 2").length).toBeGreaterThan(0); + expect(screen.getAllByText("Preference Memory 3").length).toBeGreaterThan( + 0, + ); + }); + + it("populates memory records with full data from graph nodes", async () => { + const user = userEvent.setup(); + render(<KnowledgeWorkspace />); + + const button = screen.getAllByRole("button", { name: /Code Memory 1/i })[0]; + await user.click(button); + + expect(screen.getByText("Memory: Code Memory 1")).toBeInTheDocument(); + }); + + it('includes plain "doc" node type (without underscore)', () => { + mockGraphResponse = { + nodes: [ + { + id: "doc1", + node_type: "doc", + label: "Plain Doc Memory", + title: "Plain Doc Memory", + content: "This is a plain doc memory", + tags: ["test"], + created: "2024-01-10T10:00:00Z", + file_path: "/path/to/plain.md", + kind: "code", + }, + { + id: "doc2", + node_type: "doc_code", + label: "Code Memory", + title: "Code Memory", + content: "This is code memory", + tags: ["test"], + created: "2024-01-10T10:00:00Z", + file_path: "/path/to/code.md", + kind: "code", + }, + ], + edges: [{ source: "doc1", target: "doc2", edge_type: "relates_to" }], + stats: { + doc_count: 2, + tag_count: 0, + file_count: 0, + entity_count: 0, + edge_count: 1, + active_docs: 2, + deprecated_docs: 0, + trajectory_count: 0, + }, + }; + + render(<KnowledgeWorkspace />); + + // Both memories should be visible + expect(screen.getByText("Memories: 2")).toBeInTheDocument(); + expect(screen.getAllByText("Plain Doc Memory").length).toBeGreaterThan(0); + expect(screen.getAllByText("Code Memory").length).toBeGreaterThan(0); + }); +}); diff --git a/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.tsx b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.tsx new file mode 100644 index 000000000..b8fe5b7ce --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/KnowledgeWorkspace.tsx @@ -0,0 +1,127 @@ +import { useState, useMemo } from "react"; +import { useGetKnowledgeGraphQuery } from "../../services/refact/knowledgeGraphApi"; +import { MemoryListView } from "./MemoryListView"; +import { KnowledgeGraphView } from "./KnowledgeGraphView"; +import { MemoryDetailsEditor } from "./MemoryDetailsEditor"; +import type { KnowledgeMemoRecord } from "../../services/refact/types"; +import styles from "./KnowledgeWorkspace.module.css"; + +export function KnowledgeWorkspace() { + const { + data: graph, + isLoading, + error, + } = useGetKnowledgeGraphQuery({ includeContent: true }); + const [selectedId, setSelectedId] = useState<string | null>(null); + + const allDocNodes = useMemo(() => { + if (!graph) return []; + return graph.nodes.filter((node) => { + const isDocNode = + node.node_type === "doc" || node.node_type.startsWith("doc_"); + if (!isDocNode) return false; + + const kind = node.node_type.replace("doc_", "").toLowerCase(); + return ( + kind !== "deprecated" && kind !== "archived" && kind !== "trajectory" + ); + }); + }, [graph]); + + const docDocEdges = useMemo(() => { + if (!graph) return []; + const docIds = new Set(allDocNodes.map((n) => n.id)); + return graph.edges.filter( + (edge) => docIds.has(edge.source) && docIds.has(edge.target), + ); + }, [graph, allDocNodes]); + + const linkedIds = useMemo(() => { + const ids = new Set<string>(); + docDocEdges.forEach((e) => { + ids.add(e.source); + ids.add(e.target); + }); + return ids; + }, [docDocEdges]); + + const linkedDocNodes = useMemo( + () => allDocNodes.filter((n) => linkedIds.has(n.id)), + [allDocNodes, linkedIds], + ); + + const memoryRecords = useMemo((): KnowledgeMemoRecord[] => { + return allDocNodes.map((node) => ({ + memid: node.id, + tags: node.tags ?? [], + content: node.content ?? "", + title: node.title ?? node.label, + kind: node.kind ?? node.node_type.replace("doc_", ""), + file_path: node.file_path, + created: node.created, + })); + }, [allDocNodes]); + + const selectedMemory = useMemo((): KnowledgeMemoRecord | null => { + if (!selectedId) return null; + const node = allDocNodes.find((n) => n.id === selectedId); + if (!node) return null; + return { + memid: node.id, + tags: node.tags ?? [], + content: node.content ?? "", + title: node.title ?? node.label, + kind: node.kind ?? node.node_type.replace("doc_", ""), + file_path: node.file_path, + created: node.created, + }; + }, [selectedId, allDocNodes]); + + const handleSelectMemory = (id: string | null) => { + setSelectedId(id); + }; + + const handleMemoryDeleted = () => { + setSelectedId(null); + }; + + if (error) { + return ( + <div className={styles.workspace}> + <div className={styles.error}> + <p>Failed to load knowledge graph</p> + </div> + </div> + ); + } + + return ( + <div className={styles.workspace}> + <div className={styles.editorSection}> + <MemoryDetailsEditor + memory={selectedMemory} + onMemoryDeleted={handleMemoryDeleted} + /> + </div> + + <div className={styles.listSection}> + <MemoryListView + memories={memoryRecords} + selectedId={selectedId} + onSelectId={handleSelectMemory} + linkedIds={linkedIds} + /> + </div> + + <div className={styles.graphSection}> + <KnowledgeGraphView + nodes={linkedDocNodes} + edges={docDocEdges} + selectedId={selectedId} + onSelectId={handleSelectMemory} + isLoading={isLoading} + /> + </div> + </div> + ); +} diff --git a/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.module.css b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.module.css new file mode 100644 index 000000000..2f07c6cd5 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.module.css @@ -0,0 +1,136 @@ +.container { + display: flex; + flex-direction: column; + background-color: var(--color-panel); + height: 100%; + overflow: hidden; +} + +.scrollArea { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + flex: 1; + overflow-y: auto; + padding: var(--space-3); + padding-right: var(--space-4); + display: flex; + flex-direction: column; + gap: var(--space-3); +} + +.emptyState { + color: var(--gray-10); + text-align: center; + padding: var(--space-4); +} + +.field { + display: flex; + flex-direction: column; + gap: var(--space-2); +} + +.label { + color: var(--gray-10); + font-weight: 500; + font-size: 11px; + text-transform: uppercase; + letter-spacing: 0.5px; + display: flex; + align-items: center; + gap: var(--space-1); +} + +.dirtyIndicator { + color: var(--accent-9); + font-size: 14px; +} + +.input { + border: 1px solid var(--gray-a7); + border-radius: var(--radius-1); + padding: var(--space-2); + font-size: 13px; + background-color: var(--color-surface); + color: var(--gray-12); +} + +.input:focus { + outline: none; + border-color: var(--accent-8); +} + +.readOnlyValue { + padding: var(--space-2); + background-color: var(--gray-a2); + border: 1px solid var(--gray-a6); + border-radius: var(--radius-1); + font-size: 13px; + color: var(--gray-11); + font-family: var(--font-mono); +} + +.warning { + color: var(--yellow-11); + font-family: var(--default-font-family); +} + +.tagsContainer { + display: flex; + flex-wrap: wrap; + gap: var(--space-1); +} + +.tag { + display: inline-flex; + align-items: center; + gap: var(--space-1); + padding: calc(var(--space-1) * 0.75) var(--space-2); + background-color: var(--accent-3); + color: var(--accent-11); + border-radius: var(--radius-1); + font-size: 12px; + font-weight: 500; +} + +.tagRemove { + background: none; + border: none; + color: var(--accent-11); + cursor: pointer; + padding: 0; + font-size: 16px; + line-height: 1; + opacity: 0.7; + transition: opacity 150ms ease; +} + +.tagRemove:hover { + opacity: 1; +} + +.textarea { + min-height: 300px; + padding: var(--space-2); + border: 1px solid var(--gray-a7); + border-radius: var(--radius-1); + font-family: var(--font-mono); + font-size: 13px; + line-height: 1.5; + background-color: var(--color-surface); + color: var(--gray-12); + resize: vertical; +} + +.textarea:focus { + outline: none; + border-color: var(--accent-8); +} + +.actions { + display: flex; + gap: var(--space-2); + padding: var(--space-3); + border-top: 1px solid var(--gray-a6); + background-color: var(--color-panel); + flex-shrink: 0; +} diff --git a/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.test.tsx b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.test.tsx new file mode 100644 index 000000000..2bd6f9db6 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.test.tsx @@ -0,0 +1,226 @@ +import { describe, it, expect, beforeEach } from "vitest"; +import { render, screen, fireEvent } from "@testing-library/react"; +import { Provider } from "react-redux"; +import { configureStore } from "@reduxjs/toolkit"; +import { MemoryDetailsEditor } from "./MemoryDetailsEditor"; +import { knowledgeGraphApi } from "../../services/refact/knowledgeGraphApi"; +import type { KnowledgeMemoRecord } from "../../services/refact/types"; + +const mockMemory: KnowledgeMemoRecord = { + memid: "test-123", + title: "Test Memory", + content: "Test content", + tags: ["tag1", "tag2"], + kind: "code", + file_path: "/path/to/memory.md", + created: "2024-01-01", +}; + +const createMockStore = () => { + return configureStore({ + reducer: { + [knowledgeGraphApi.reducerPath]: knowledgeGraphApi.reducer, + config: (state = { lspPort: 8001, apiKey: "" }) => state, + }, + middleware: (getDefaultMiddleware) => + getDefaultMiddleware().concat(knowledgeGraphApi.middleware), + }); +}; + +describe("MemoryDetailsEditor", () => { + let store: ReturnType<typeof createMockStore>; + + beforeEach(() => { + store = createMockStore(); + }); + + it('renders "No memory selected" when memory is null', () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={null} /> + </Provider>, + ); + + expect(screen.getByText("No memory selected")).toBeInTheDocument(); + }); + + it("displays all memory fields when memory is provided", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + expect(screen.getByDisplayValue("Test Memory")).toBeInTheDocument(); + expect(screen.getByDisplayValue("Test content")).toBeInTheDocument(); + expect(screen.getByText("tag1")).toBeInTheDocument(); + expect(screen.getByText("tag2")).toBeInTheDocument(); + expect(screen.getByText("code")).toBeInTheDocument(); + expect(screen.getByText("2024-01-01")).toBeInTheDocument(); + expect(screen.getByText("/path/to/memory.md")).toBeInTheDocument(); + }); + + it("sets isDirty to true when title is edited", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const titleInput = screen.getByDisplayValue("Test Memory"); + fireEvent.change(titleInput, { target: { value: "Updated Title" } }); + + expect(screen.getByText("●")).toBeInTheDocument(); + }); + + it("disables save button when not dirty", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const saveButton = screen.getByRole("button", { name: /save/i }); + expect(saveButton).toBeDisabled(); + }); + + it("enables save button when dirty", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const titleInput = screen.getByDisplayValue("Test Memory"); + fireEvent.change(titleInput, { target: { value: "Updated Title" } }); + + const saveButton = screen.getByRole("button", { name: /save/i }); + expect(saveButton).not.toBeDisabled(); + }); + + it("parses tags correctly on blur", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const tagsInput = screen.getByPlaceholderText("comma, separated, tags"); + fireEvent.change(tagsInput, { target: { value: "new1, new2, new3" } }); + fireEvent.blur(tagsInput); + + expect(screen.getByText("new1")).toBeInTheDocument(); + expect(screen.getByText("new2")).toBeInTheDocument(); + expect(screen.getByText("new3")).toBeInTheDocument(); + }); + + it("removes tag when X is clicked", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const removeButton = screen.getAllByLabelText(/remove/i)[0]; + fireEvent.click(removeButton); + + expect(screen.queryByText("tag1")).not.toBeInTheDocument(); + expect(screen.getByText("tag2")).toBeInTheDocument(); + }); + + it("shows delete confirmation dialog when delete is clicked", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const deleteButton = screen.getByRole("button", { name: /delete/i }); + fireEvent.click(deleteButton); + + expect(screen.getByText("Delete Memory")).toBeInTheDocument(); + expect( + screen.getByRole("button", { name: /archive/i }), + ).toBeInTheDocument(); + expect( + screen.getByRole("button", { name: /permanently delete/i }), + ).toBeInTheDocument(); + }); + + it("disables save and delete when file_path is missing", () => { + const memoryWithoutPath = { ...mockMemory, file_path: undefined }; + + render( + <Provider store={store}> + <MemoryDetailsEditor memory={memoryWithoutPath} /> + </Provider>, + ); + + expect(screen.getByText(/no file path/i)).toBeInTheDocument(); + + const titleInput = screen.getByDisplayValue("Test Memory"); + fireEvent.change(titleInput, { target: { value: "Updated" } }); + + const saveButton = screen.getByRole("button", { name: /save/i }); + const deleteButton = screen.getByRole("button", { name: /delete/i }); + + expect(saveButton).toBeDisabled(); + expect(deleteButton).toBeDisabled(); + }); + + it("resets draft when memory changes", () => { + const { rerender } = render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const titleInput = screen.getByDisplayValue("Test Memory"); + fireEvent.change(titleInput, { target: { value: "Updated Title" } }); + + expect(screen.getByText("●")).toBeInTheDocument(); + + const newMemory = { + ...mockMemory, + memid: "new-id", + file_path: "/new/path.md", + }; + rerender( + <Provider store={store}> + <MemoryDetailsEditor memory={newMemory} /> + </Provider>, + ); + + expect(screen.queryByText("●")).not.toBeInTheDocument(); + }); + + it("deduplicates tags", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const tagsInput = screen.getByPlaceholderText("comma, separated, tags"); + fireEvent.change(tagsInput, { target: { value: "tag1, tag1, tag2" } }); + fireEvent.blur(tagsInput); + + const tag1Elements = screen.getAllByText("tag1"); + expect(tag1Elements).toHaveLength(1); + }); + + it("trims and filters empty tags", () => { + render( + <Provider store={store}> + <MemoryDetailsEditor memory={mockMemory} /> + </Provider>, + ); + + const tagsInput = screen.getByPlaceholderText("comma, separated, tags"); + fireEvent.change(tagsInput, { target: { value: " tag1 , , tag2 " } }); + fireEvent.blur(tagsInput); + + expect(screen.getByText("tag1")).toBeInTheDocument(); + expect(screen.getByText("tag2")).toBeInTheDocument(); + }); +}); diff --git a/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.tsx b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.tsx new file mode 100644 index 000000000..2c082ea17 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryDetailsEditor.tsx @@ -0,0 +1,284 @@ +import { useState, useEffect } from "react"; +import { Button, Dialog, Flex, TextField } from "@radix-ui/themes"; +import type { KnowledgeMemoRecord } from "../../services/refact/types"; +import { + useUpdateMemoryMutation, + useDeleteMemoryMutation, +} from "../../services/refact/knowledgeGraphApi"; +import styles from "./MemoryDetailsEditor.module.css"; + +interface MemoryDetailsEditorProps { + memory: KnowledgeMemoRecord | null; + onMemoryUpdated?: () => void; + onMemoryDeleted?: () => void; +} + +interface DraftMemory { + title: string; + content: string; + tags: string[]; + kind: string; +} + +export function MemoryDetailsEditor({ + memory, + onMemoryUpdated, + onMemoryDeleted, +}: MemoryDetailsEditorProps) { + const [draft, setDraft] = useState<DraftMemory>({ + title: "", + content: "", + tags: [], + kind: "code", + }); + const [isDirty, setIsDirty] = useState(false); + const [isDeleteOpen, setIsDeleteOpen] = useState(false); + const [showDiscardDialog, setShowDiscardDialog] = useState(false); + const [tagsInput, setTagsInput] = useState(""); + + const [updateMemory, { isLoading: isSaving }] = useUpdateMemoryMutation(); + const [deleteMemory] = useDeleteMemoryMutation(); + + useEffect(() => { + if (!memory) { + setDraft({ title: "", content: "", tags: [], kind: "code" }); + setIsDirty(false); + setTagsInput(""); + } else { + setDraft({ + title: memory.title ?? "", + content: memory.content, + tags: memory.tags, + kind: memory.kind ?? "code", + }); + setIsDirty(false); + setTagsInput(memory.tags.join(", ")); + } + }, [memory]); + + const handleFieldChange = ( + field: keyof DraftMemory, + value: string | string[], + ) => { + setDraft((prev) => ({ ...prev, [field]: value })); + setIsDirty(true); + }; + + const parseTags = (input: string): string[] => { + return input + .split(/[,\n]/) + .map((tag) => tag.trim()) + .filter((tag) => tag.length > 0) + .filter((tag, index, self) => self.indexOf(tag) === index); + }; + + const handleTagsBlur = () => { + const parsed = parseTags(tagsInput); + handleFieldChange("tags", parsed); + }; + + const handleRemoveTag = (tagToRemove: string) => { + const newTags = draft.tags.filter((tag) => tag !== tagToRemove); + handleFieldChange("tags", newTags); + setTagsInput(newTags.join(", ")); + }; + + const handleSave = () => { + if (!memory?.file_path || !draft.title || !draft.content) return; + + void updateMemory({ + file_path: memory.file_path, + title: draft.title, + content: draft.content, + tags: draft.tags, + kind: draft.kind, + filenames: [memory.file_path], + }) + .unwrap() + .then(() => { + setIsDirty(false); + onMemoryUpdated?.(); + }) + .catch((_error: unknown) => { + // Error is handled by RTK Query + }); + }; + + const handleDelete = (archive: boolean) => { + if (!memory?.file_path) return; + + void deleteMemory({ + file_path: memory.file_path, + archive, + }) + .unwrap() + .then(() => { + setIsDeleteOpen(false); + onMemoryDeleted?.(); + }) + .catch((_error: unknown) => { + // Error is handled by RTK Query + }); + }; + + const handleDiscardChanges = () => { + setShowDiscardDialog(false); + setIsDirty(false); + }; + + if (!memory) { + return ( + <div className={styles.container}> + <p className={styles.emptyState}>No memory selected</p> + </div> + ); + } + + const canSave = memory.file_path && isDirty && draft.title && draft.content; + const canDelete = memory.file_path; + + return ( + <div className={styles.container}> + <div className={styles.scrollArea}> + <div className={styles.field}> + <label className={styles.label}> + TITLE {isDirty && <span className={styles.dirtyIndicator}>●</span>} + </label> + <TextField.Root + value={draft.title} + onChange={(e) => handleFieldChange("title", e.target.value)} + placeholder="Untitled" + className={styles.input} + /> + </div> + + <div className={styles.field}> + <label className={styles.label}>KIND</label> + <div className={styles.readOnlyValue}>{draft.kind}</div> + </div> + + <div className={styles.field}> + <label className={styles.label}>CREATED</label> + <div className={styles.readOnlyValue}>{memory.created ?? "—"}</div> + </div> + + <div className={styles.field}> + <label className={styles.label}>TAGS</label> + {draft.tags.length > 0 && ( + <div className={styles.tagsContainer}> + {draft.tags.map((tag) => ( + <span key={tag} className={styles.tag}> + {tag} + <button + className={styles.tagRemove} + onClick={() => handleRemoveTag(tag)} + aria-label={`Remove ${tag}`} + > + × + </button> + </span> + ))} + </div> + )} + <TextField.Root + value={tagsInput} + onChange={(e) => setTagsInput(e.target.value)} + onBlur={handleTagsBlur} + placeholder="comma, separated, tags" + className={styles.input} + /> + </div> + + <div className={styles.field}> + <label className={styles.label}>FILE PATH</label> + <div className={styles.readOnlyValue}> + {memory.file_path ?? ( + <span className={styles.warning}> + ⚠️ This memory has no file path and cannot be edited + </span> + )} + </div> + </div> + + <div className={styles.field}> + <label className={styles.label}>CONTENT</label> + <textarea + value={draft.content} + onChange={(e) => handleFieldChange("content", e.target.value)} + className={styles.textarea} + placeholder="Memory content..." + /> + </div> + </div> + + <div className={styles.actions}> + <Button + onClick={handleSave} + disabled={!canSave || isSaving} + style={{ flex: 1 }} + > + {isSaving ? "Saving..." : "Save"} + </Button> + <Button + color="red" + variant="outline" + onClick={() => setIsDeleteOpen(true)} + disabled={!canDelete} + style={{ flex: 1 }} + > + Delete + </Button> + </div> + + {isDeleteOpen && ( + <Dialog.Root open={isDeleteOpen} onOpenChange={setIsDeleteOpen}> + <Dialog.Content> + <Dialog.Title>Delete Memory</Dialog.Title> + <Flex direction="column" gap="3"> + <p>What would you like to do?</p> + <Flex gap="2" justify="end"> + <Button + variant="outline" + onClick={() => setIsDeleteOpen(false)} + > + Cancel + </Button> + <Button color="yellow" onClick={() => handleDelete(true)}> + Archive + </Button> + <Button color="red" onClick={() => handleDelete(false)}> + Permanently Delete + </Button> + </Flex> + </Flex> + </Dialog.Content> + </Dialog.Root> + )} + + {showDiscardDialog && ( + <Dialog.Root + open={showDiscardDialog} + onOpenChange={setShowDiscardDialog} + > + <Dialog.Content> + <Dialog.Title>Unsaved Changes</Dialog.Title> + <Flex direction="column" gap="3"> + <p>You have unsaved changes. Discard them?</p> + <Flex gap="2" justify="end"> + <Button + variant="outline" + onClick={() => setShowDiscardDialog(false)} + > + Cancel + </Button> + <Button color="red" onClick={handleDiscardChanges}> + Discard + </Button> + </Flex> + </Flex> + </Dialog.Content> + </Dialog.Root> + )} + </div> + ); +} diff --git a/refact-agent/gui/src/features/Knowledge/MemoryListView.README.md b/refact-agent/gui/src/features/Knowledge/MemoryListView.README.md new file mode 100644 index 000000000..1cda8f51f --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryListView.README.md @@ -0,0 +1,80 @@ +# MemoryListView Component + +A card-based grid view for displaying knowledge memories with responsive layout and interactive selection. + +## Features + +- **Responsive Grid**: 2-3 column layout that adapts to screen size +- **Kind Badges**: Color-coded icons matching KnowledgeGraph colors +- **Selection State**: Visual highlight for selected cards +- **Link Indicators**: 🔗 badge for memories that appear in graph edges +- **Tag Display**: Shows up to 3 tag dots with "+N" overflow indicator +- **Empty State**: Friendly message when no memories available +- **Accessibility**: Keyboard-focusable cards with ARIA labels + +## Usage + +```tsx +import { MemoryListView } from "./features/Knowledge"; + +function MyComponent() { + const [selectedId, setSelectedId] = useState<string | null>(null); + const linkedIds = new Set(["mem-1", "mem-3"]); + + const filteredMemories = memories.filter( + (m) => m.kind !== "deprecated" && m.kind !== "trajectory", + ); + + return ( + <MemoryListView + memories={filteredMemories} + selectedId={selectedId} + onSelectId={setSelectedId} + linkedIds={linkedIds} + /> + ); +} +``` + +## Props + +| Prop | Type | Description | +| ------------ | ----------------------- | -------------------------------------------- | +| `memories` | `KnowledgeMemoRecord[]` | Array of memory records to display | +| `selectedId` | `string \| null` | ID of currently selected memory | +| `onSelectId` | `(id: string) => void` | Callback when card is clicked | +| `linkedIds` | `Set<string>` | Set of memory IDs that appear in graph edges | + +## Kind Colors + +Matches KnowledgeGraph.tsx colors: + +- 📄 **code** - Blue (#3B82F6) +- 🎯 **decision** - Purple (#8B5CF6) +- ⭐ **preference** - Green (#10B981) +- 🔄 **pattern** - Amber (#F59E0B) +- 📚 **lesson** - Cyan (#06B6D4) + +## Layout + +- **Mobile/Small**: 2 columns (min-width: 768px) +- **Desktop**: 3 columns (min-width: 1200px) +- **Card min-height**: 120px +- **Gap**: `var(--space-3)` (12px) + +## Styling + +Uses Radix design tokens exclusively: + +- Colors: `--color-panel`, `--gray-a7`, `--accent-9` +- Spacing: `--space-1` through `--space-4` +- Radius: `--radius-1`, `--radius-2` +- Transitions: 150ms ease + +## Accessibility + +- Cards are `<button>` elements for keyboard navigation +- `aria-pressed` indicates selection state +- `aria-label` on kind badges and link indicators +- Focus ring with `outline: 2px solid var(--accent-9)` +- Title tooltips on tag dots (via `title` attribute) diff --git a/refact-agent/gui/src/features/Knowledge/MemoryListView.module.css b/refact-agent/gui/src/features/Knowledge/MemoryListView.module.css new file mode 100644 index 000000000..e2997cd6b --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryListView.module.css @@ -0,0 +1,167 @@ +.container { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + padding: var(--space-3); + padding-right: var(--space-4); + background-color: var(--color-surface); + overflow-y: auto; + height: 100%; +} + +.grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(280px, 1fr)); + gap: var(--space-3); +} + +@media (min-width: 768px) { + .grid { + grid-template-columns: repeat(2, 1fr); + } +} + +@media (min-width: 1200px) { + .grid { + grid-template-columns: repeat(3, 1fr); + } +} + +.card { + display: flex; + flex-direction: column; + gap: var(--space-2); + padding: var(--space-3); + background-color: var(--color-panel); + border: 1px solid var(--gray-a7); + border-radius: var(--radius-2); + min-height: 120px; + cursor: pointer; + transition: all 150ms ease; + text-align: left; + width: 100%; +} + +.card:hover { + background-color: var(--gray-a2); + border-color: var(--gray-a8); + box-shadow: 0 2px 8px var(--gray-a4); +} + +.card:focus-visible { + outline: 2px solid var(--accent-9); + outline-offset: 2px; +} + +.card.selected { + border-color: var(--accent-9); + background-color: var(--accent-a2); + box-shadow: 0 0 0 1px var(--accent-9); +} + +.header { + display: flex; + justify-content: space-between; + align-items: flex-start; + gap: var(--space-2); +} + +.headerLeft { + display: flex; + align-items: center; + gap: var(--space-2); + flex: 1; + min-width: 0; +} + +.kindBadge { + flex-shrink: 0; + width: 24px; + height: 24px; + border-radius: var(--radius-1); + display: flex; + align-items: center; + justify-content: center; + font-size: 14px; +} + +.title { + flex: 1; + font-size: 14px; + font-weight: 600; + color: var(--gray-12); + overflow: hidden; + text-overflow: ellipsis; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + line-height: 1.4; + word-break: break-word; +} + +.linkBadge { + flex-shrink: 0; + font-size: 14px; + opacity: 0.7; +} + +.metadata { + display: flex; + flex-direction: column; + gap: var(--space-1); +} + +.metaRow { + display: flex; + align-items: center; + gap: var(--space-1); + font-size: 12px; +} + +.metaLabel { + color: var(--gray-10); + font-weight: 500; +} + +.metaValue { + color: var(--gray-12); +} + +.tags { + display: flex; + align-items: center; + gap: 4px; + flex-wrap: wrap; +} + +.tagDot { + color: var(--accent-9); + font-size: 10px; + line-height: 1; +} + +.tagMore { + font-size: 11px; + color: var(--gray-10); + font-weight: 500; +} + +.emptyState { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + min-height: 200px; + gap: var(--space-3); + padding: var(--space-4); + text-align: center; +} + +.emptyIcon { + font-size: 48px; + opacity: 0.3; +} + +.emptyText { + font-size: 14px; + color: var(--gray-10); + margin: 0; +} diff --git a/refact-agent/gui/src/features/Knowledge/MemoryListView.test.tsx b/refact-agent/gui/src/features/Knowledge/MemoryListView.test.tsx new file mode 100644 index 000000000..00373ccc6 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryListView.test.tsx @@ -0,0 +1,194 @@ +import { describe, it, expect, vi } from "vitest"; +import { render, screen } from "@testing-library/react"; +import userEvent from "@testing-library/user-event"; +import { MemoryListView } from "./MemoryListView"; +import type { KnowledgeMemoRecord } from "../../services/refact/types"; + +const mockMemories: KnowledgeMemoRecord[] = [ + { + memid: "mem-1", + tags: ["tag1", "tag2"], + content: "Test content 1", + title: "Test Memory 1", + kind: "code", + }, + { + memid: "mem-2", + tags: ["tag3", "tag4", "tag5", "tag6"], + content: "Test content 2", + title: "Test Memory 2", + kind: "decision", + }, + { + memid: "mem-3", + tags: [], + content: "Test content 3", + title: "Very Long Title That Should Be Truncated After Two Lines Of Text", + kind: "preference", + }, +]; + +describe("MemoryListView", () => { + it("renders empty state when no memories", () => { + render( + <MemoryListView + memories={[]} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("No memories to display")).toBeInTheDocument(); + }); + + it("renders card grid with memories", () => { + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("Test Memory 1")).toBeInTheDocument(); + expect(screen.getByText("Test Memory 2")).toBeInTheDocument(); + expect(screen.getByText(/Very Long Title/)).toBeInTheDocument(); + }); + + it("calls onSelectId when card is clicked", async () => { + const user = userEvent.setup(); + const onSelectId = vi.fn(); + + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={onSelectId} + linkedIds={new Set()} + />, + ); + + const card = screen.getByText("Test Memory 1").closest("button"); + expect(card).toBeInTheDocument(); + + if (card) { + await user.click(card); + expect(onSelectId).toHaveBeenCalledWith("mem-1"); + } + }); + + it("highlights selected card", () => { + render( + <MemoryListView + memories={mockMemories} + selectedId="mem-2" + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + const selectedCard = screen.getByText("Test Memory 2").closest("button"); + expect(selectedCard?.className).toContain("selected"); + }); + + it("shows link badge for linked memories", () => { + const linkedIds = new Set(["mem-1", "mem-3"]); + + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={linkedIds} + />, + ); + + const linkBadges = screen.getAllByLabelText("Linked in graph"); + expect(linkBadges).toHaveLength(2); + }); + + it("displays kind badges with correct icons", () => { + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByLabelText("Kind: code")).toBeInTheDocument(); + expect(screen.getByLabelText("Kind: decision")).toBeInTheDocument(); + expect(screen.getByLabelText("Kind: preference")).toBeInTheDocument(); + }); + + it("shows tag dots and +N indicator", () => { + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("+1")).toBeInTheDocument(); + }); + + it("capitalizes kind in metadata", () => { + render( + <MemoryListView + memories={mockMemories} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("Code")).toBeInTheDocument(); + expect(screen.getByText("Decision")).toBeInTheDocument(); + expect(screen.getByText("Preference")).toBeInTheDocument(); + }); + + it("handles memory without title", () => { + const memoryWithoutTitle: KnowledgeMemoRecord = { + memid: "mem-4", + tags: [], + content: "Content", + kind: "code", + }; + + render( + <MemoryListView + memories={[memoryWithoutTitle]} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("Untitled")).toBeInTheDocument(); + }); + + it("handles memory without kind", () => { + const memoryWithoutKind: KnowledgeMemoRecord = { + memid: "mem-5", + tags: [], + content: "Content", + title: "Test", + }; + + render( + <MemoryListView + memories={[memoryWithoutKind]} + selectedId={null} + onSelectId={vi.fn()} + linkedIds={new Set()} + />, + ); + + expect(screen.getByText("Code")).toBeInTheDocument(); + }); +}); diff --git a/refact-agent/gui/src/features/Knowledge/MemoryListView.tsx b/refact-agent/gui/src/features/Knowledge/MemoryListView.tsx new file mode 100644 index 000000000..791df8f04 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/MemoryListView.tsx @@ -0,0 +1,137 @@ +import { useEffect, useRef } from "react"; +import type { KnowledgeMemoRecord } from "../../services/refact/types"; +import styles from "./MemoryListView.module.css"; + +interface MemoryListViewProps { + memories: KnowledgeMemoRecord[]; + selectedId: string | null; + onSelectId: (id: string) => void; + linkedIds: Set<string>; +} + +const KIND_CONFIG = { + code: { icon: "📄", color: "#3B82F6" }, + decision: { icon: "🎯", color: "#8B5CF6" }, + preference: { icon: "⭐", color: "#10B981" }, + pattern: { icon: "🔄", color: "#F59E0B" }, + lesson: { icon: "📚", color: "#06B6D4" }, +} as const; + +type KindKey = keyof typeof KIND_CONFIG; + +function getKindConfig(kind: string | undefined): { + icon: string; + color: string; +} { + if (kind && kind in KIND_CONFIG) { + return KIND_CONFIG[kind as KindKey]; + } + return KIND_CONFIG.code; +} + +export function MemoryListView({ + memories, + selectedId, + onSelectId, + linkedIds, +}: MemoryListViewProps) { + const cardRefs = useRef<Map<string, HTMLButtonElement>>(new Map()); + + useEffect(() => { + if (selectedId && cardRefs.current.has(selectedId)) { + const element = cardRefs.current.get(selectedId); + element?.scrollIntoView({ + behavior: "smooth", + block: "nearest", + }); + } + }, [selectedId]); + + if (memories.length === 0) { + return ( + <div className={styles.emptyState}> + <div className={styles.emptyIcon}>🔍</div> + <p className={styles.emptyText}>No memories to display</p> + </div> + ); + } + + return ( + <div className={styles.container}> + <div className={styles.grid}> + {memories.map((memory) => { + const isSelected = selectedId === memory.memid; + const isLinked = linkedIds.has(memory.memid); + const kind = memory.kind ?? "code"; + const kindConfig = getKindConfig(memory.kind); + + return ( + <button + key={memory.memid} + ref={(el) => { + if (el) { + cardRefs.current.set(memory.memid, el); + } else { + cardRefs.current.delete(memory.memid); + } + }} + className={`${styles.card} ${isSelected ? styles.selected : ""}`} + onClick={() => onSelectId(memory.memid)} + type="button" + aria-pressed={isSelected} + > + <div className={styles.header}> + <div className={styles.headerLeft}> + <span + className={styles.kindBadge} + style={{ backgroundColor: kindConfig.color }} + aria-label={`Kind: ${kind}`} + > + {kindConfig.icon} + </span> + <span className={styles.title}> + {memory.title ?? "Untitled"} + </span> + </div> + {isLinked && ( + <span + className={styles.linkBadge} + aria-label="Linked in graph" + > + 🔗 + </span> + )} + </div> + + <div className={styles.metadata}> + <div className={styles.metaRow}> + <span className={styles.metaLabel}>Kind:</span> + <span className={styles.metaValue}> + {kind.charAt(0).toUpperCase() + kind.slice(1)} + </span> + </div> + {memory.tags.length > 0 && ( + <div className={styles.metaRow}> + <span className={styles.metaLabel}>Tags:</span> + <div className={styles.tags}> + {memory.tags.slice(0, 3).map((tag) => ( + <span key={tag} className={styles.tagDot} title={tag}> + ● + </span> + ))} + {memory.tags.length > 3 && ( + <span className={styles.tagMore}> + +{memory.tags.length - 3} + </span> + )} + </div> + </div> + )} + </div> + </button> + ); + })} + </div> + </div> + ); +} diff --git a/refact-agent/gui/src/features/Knowledge/index.ts b/refact-agent/gui/src/features/Knowledge/index.ts new file mode 100644 index 000000000..fa0f685c3 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/index.ts @@ -0,0 +1,16 @@ +export { KnowledgeWorkspace } from "./KnowledgeWorkspace"; +export { KnowledgeGraphView } from "./KnowledgeGraphView"; +export { MemoryListView } from "./MemoryListView"; +export { MemoryDetailsEditor } from "./MemoryDetailsEditor"; +export { useKnowledgeGraphTheme } from "./useKnowledgeGraphTheme"; +export { + knowledgeSlice, + setVecDbStatus, + setMemory, + deleteMemory, + clearMemory, + selectVecDbStatus, + selectMemories, + selectKnowledgeIsLoaded, +} from "./knowledgeSlice"; +export type { KnowledgeState } from "./knowledgeSlice"; diff --git a/refact-agent/gui/src/features/Knowledge/knowledgeGraphSubgraph.ts b/refact-agent/gui/src/features/Knowledge/knowledgeGraphSubgraph.ts new file mode 100644 index 000000000..f0d74b893 --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/knowledgeGraphSubgraph.ts @@ -0,0 +1,71 @@ +import type { + KnowledgeGraphNode, + KnowledgeGraphEdge, +} from "../../services/refact/types"; + +export type SubgraphParams = { + seedId: string; + depth: 1 | 2; + nodes: KnowledgeGraphNode[]; + edges: KnowledgeGraphEdge[]; + includeNode: (node: KnowledgeGraphNode) => boolean; +}; + +export type SubgraphResult = { + nodeIds: Set<string>; + edgeIds: Set<string>; +}; + +export function makeEdgeId( + source: string, + target: string, + edgeType: string, +): string { + return JSON.stringify([source, target, edgeType]); +} + +export function buildSubgraph(params: SubgraphParams): SubgraphResult { + const { seedId, depth, nodes, edges, includeNode } = params; + + const nodeIndex = new Map(nodes.map((n) => [n.id, n])); + const seedNode = nodeIndex.get(seedId); + + if (!seedNode) { + return { nodeIds: new Set(), edgeIds: new Set() }; + } + + const nodeIds = new Set<string>(); + const queue: { id: string; d: number }[] = [{ id: seedId, d: 0 }]; + + while (queue.length > 0) { + // eslint-disable-next-line @typescript-eslint/no-non-null-assertion + const { id, d } = queue.shift()!; + + if (nodeIds.has(id)) continue; + + const node = nodeIndex.get(id); + if (!node || !includeNode(node)) continue; + + nodeIds.add(id); + + if (d < depth) { + for (const edge of edges) { + if (edge.source === id && !nodeIds.has(edge.target)) { + queue.push({ id: edge.target, d: d + 1 }); + } + if (edge.target === id && !nodeIds.has(edge.source)) { + queue.push({ id: edge.source, d: d + 1 }); + } + } + } + } + + const edgeIds = new Set<string>(); + for (const edge of edges) { + if (nodeIds.has(edge.source) && nodeIds.has(edge.target)) { + edgeIds.add(makeEdgeId(edge.source, edge.target, edge.edge_type)); + } + } + + return { nodeIds, edgeIds }; +} diff --git a/refact-agent/gui/src/features/Knowledge/useKnowledgeGraphTheme.ts b/refact-agent/gui/src/features/Knowledge/useKnowledgeGraphTheme.ts new file mode 100644 index 000000000..07d650d9e --- /dev/null +++ b/refact-agent/gui/src/features/Knowledge/useKnowledgeGraphTheme.ts @@ -0,0 +1,28 @@ +export function useKnowledgeGraphTheme() { + const isDark = + document.documentElement.getAttribute("data-appearance") === "dark" || + document.documentElement.classList.contains("dark"); + + const colors = { + surface: "var(--color-surface)", + panel: "var(--color-panel)", + accent: "var(--accent-9)", + gray: "var(--gray-9)", + + kind: { + code: "#3b82f6", + decision: "#8b5cf6", + trajectory: "#6b7280", + preference: "#10b981", + other: "#6b7280", + }, + + status: { + active: "var(--accent-9)", + deprecated: "#ef4444", + archived: "#9ca3af", + }, + }; + + return { colors, isDark }; +} diff --git a/refact-agent/gui/src/features/Login/LoginPage.stories.tsx b/refact-agent/gui/src/features/Login/LoginPage.stories.tsx index f8aa698e8..fa43a9916 100644 --- a/refact-agent/gui/src/features/Login/LoginPage.stories.tsx +++ b/refact-agent/gui/src/features/Login/LoginPage.stories.tsx @@ -19,9 +19,6 @@ const App = () => { addressURL: "Refact", themeProps: { appearance: "dark", accentColor: "gray" }, }, - tour: { - type: "finished", - }, }); return ( <Provider store={store}> diff --git a/refact-agent/gui/src/features/Login/LoginPage.tsx b/refact-agent/gui/src/features/Login/LoginPage.tsx index 76bd35255..c2fc2e529 100644 --- a/refact-agent/gui/src/features/Login/LoginPage.tsx +++ b/refact-agent/gui/src/features/Login/LoginPage.tsx @@ -3,21 +3,63 @@ import { Flex, Box, Button, + Card, + Grid, Text, Separator, TextField, Container, Heading, + Badge, } from "@radix-ui/themes"; -import { GitHubLogoIcon } from "@radix-ui/react-icons"; +import { GitHubLogoIcon, CheckCircledIcon } from "@radix-ui/react-icons"; import { GoogleIcon } from "../../images/GoogleIcon"; import { Accordion } from "../../components/Accordion"; -import { useLogin, useEmailLogin, useEventsBusForIDE } from "../../hooks"; +import { ScrollArea } from "../../components/ScrollArea"; +import { + useAppDispatch, + useAppSelector, + useEmailLogin, + useLogin, + useLogout, + useGetUser, + useGetConfiguredProvidersQuery, +} from "../../hooks"; +import { ProviderCard } from "../Providers/ProviderCard"; +import { ProviderPreview } from "../Providers/ProviderPreview"; +import type { ProviderListItem } from "../../services/refact"; +import { useGetConfiguredProvidersView } from "../Providers/ProvidersView/useConfiguredProvidersView"; +import { newChatAction } from "../Chat"; +import { push } from "../Pages/pagesSlice"; +import { + selectApiKey, + selectAddressURL, + setApiKey, + setAddressURL, +} from "../Config/configSlice"; +import { hasAnyUsableActiveProvider } from "./providerAccess"; export const LoginPage: React.FC = () => { const { loginWithProvider, polling, cancelLogin } = useLogin(); - const { setupHost } = useEventsBusForIDE(); const { emailLogin, emailLoginResult, emailLoginAbort } = useEmailLogin(); + const dispatch = useAppDispatch(); + const logout = useLogout(); + const user = useGetUser(); + + const apiKey = useAppSelector(selectApiKey); + const addressURL = useAppSelector(selectAddressURL); + + const isRefactCloudLoggedIn = + (addressURL ?? "").trim().toLowerCase() === "refact" && + (apiKey ?? "").trim().length > 0; + + const providersQuery = useGetConfiguredProvidersQuery(); + const configuredProviders = providersQuery.data?.providers ?? []; + const { sortedConfiguredProviders } = useGetConfiguredProvidersView({ + configuredProviders, + }); + const [currentProvider, setCurrentProvider] = + React.useState<ProviderListItem | null>(null); const emailIsLoading = React.useMemo(() => { if ( @@ -47,160 +89,222 @@ export const LoginPage: React.FC = () => { } }, [cancelLogin, emailLoginAbort]); + const hasAnyActiveProvider = React.useMemo(() => { + return hasAnyUsableActiveProvider({ + providers: sortedConfiguredProviders, + addressURL, + apiKey, + }); + }, [sortedConfiguredProviders, addressURL, apiKey]); + + const onContinue = useCallback(() => { + // BYOK path: mark as “logged in” locally without triggering SmallCloud user fetch. + if (!addressURL || addressURL.trim().length === 0) { + dispatch(setAddressURL("BYOK")); + } + if (!apiKey || apiKey.trim().length === 0) { + dispatch(setApiKey("byok")); + } + + dispatch(push({ name: "history" })); + dispatch(newChatAction()); + dispatch(push({ name: "chat" })); + }, [addressURL, apiKey, dispatch]); + return ( - <Container> - <Heading align="center" as="h2" size="6" my="6"> - Login to Refact.ai - </Heading> - <Accordion.Root - type="single" - defaultValue={"cloud"} - disabled={isLoading} - collapsible - > - <Accordion.Item value="cloud"> - <Accordion.Trigger>Refact Cloud</Accordion.Trigger> - <Accordion.Content> - <Box> - <Text size="2"> - <ul> - <li> - Chat with your codebase powered by top models (e.g. Claude - 3.7 Sonnet, OpenAI GPT-4o and o3-mini). - </li> - <li>Unlimited Code Completions (powered by Qwen2.5).</li> - <li>Codebase-aware vector database (RAG).</li> - <li> - Agentic features: browser use, database connect, debugger, - shell commands, etc. - </li> - </ul> - </Text> - </Box> - <Separator size="4" my="4" /> - <Flex direction="column" gap="3" align="center"> - <Button - onClick={() => { - onCancel(); - loginWithProvider("google"); - }} - disabled={isLoading} - > - <GoogleIcon width="15" height="15" /> Continue with Google - </Button> - <Button - onClick={() => { - onCancel(); - loginWithProvider("github"); - }} - disabled={isLoading} - > - <GitHubLogoIcon width="15" height="15" /> Continue with GitHub - </Button> + <ScrollArea scrollbars="vertical" fullHeight> + <Container> + <Heading align="center" as="h2" size="6" my="6"> + Login to Refact.ai + </Heading> - <Text>or</Text> - - <Flex asChild direction="column" gap="3"> - <form - onSubmit={(event) => { - event.preventDefault(); - if (isLoading) return; - const formData = new FormData(event.currentTarget); - const email = formData.get("email"); - if (typeof email === "string") { - emailLogin(email); - } - }} - > - <TextField.Root - placeholder="Email Address" - type="email" - name="email" - required - disabled={isLoading} - /> + <Accordion.Root + type="single" + defaultValue={"cloud"} + disabled={isLoading} + collapsible + > + <Accordion.Item value="cloud"> + <Accordion.Trigger>Refact Cloud</Accordion.Trigger> + <Accordion.Content> + {isRefactCloudLoggedIn ? ( + <Flex direction="column" gap="3" align="center"> + <Flex align="center" gap="2"> + <CheckCircledIcon + width="16" + height="16" + color="var(--green-9)" + /> + <Text size="2" weight="medium"> + Logged in to Refact Cloud + </Text> + </Flex> + {user.data && ( + <Badge size="2" variant="soft"> + {user.data.account} + </Badge> + )} <Button - type="submit" - loading={emailIsLoading} - disabled={isLoading} + variant="outline" + color="red" + size="1" + onClick={logout} > - Send magic link - </Button>{" "} - {isLoading && <Button onClick={onCancel}>Cancel</Button>} - <Text size="1" align="center"> - We will send you a one-time login link by email. - </Text> - </form> - </Flex> - </Flex> - </Accordion.Content> - </Accordion.Item> - <Accordion.Item value="private"> - <Accordion.Trigger>Private Server</Accordion.Trigger> - <Accordion.Content> - <Box> - <Text size="2"> - <ul> - <li> - User your own Refact server (Enterprise or self-hosted). - </li> - <li>Fine-tune code completions to your codebase</li> - <li>Keep all code and data under your control.</li> - </ul> + Log out + </Button> + </Flex> + ) : ( + <> + <Box> + <Text size="2"> + <ul> + <li> + Chat with your codebase powered by top models (e.g. + Claude 3.7 Sonnet, OpenAI GPT-4o and o3-mini). + </li> + <li> + Unlimited Code Completions (powered by Qwen2.5). + </li> + <li>Codebase-aware vector database (RAG).</li> + <li> + Agentic features: browser use, database connect, + debugger, shell commands, etc. + </li> + </ul> + </Text> + </Box> + <Separator size="4" my="4" /> + <Flex direction="column" gap="3" align="center"> + <Button + onClick={() => { + onCancel(); + loginWithProvider("google"); + }} + disabled={isLoading} + > + <GoogleIcon width="15" height="15" /> Continue with Google + </Button> + <Button + onClick={() => { + onCancel(); + loginWithProvider("github"); + }} + disabled={isLoading} + > + <GitHubLogoIcon width="15" height="15" /> Continue with + GitHub + </Button> + + <Text>or</Text> + + <Flex asChild direction="column" gap="3"> + <form + onSubmit={(event) => { + event.preventDefault(); + if (isLoading) return; + const formData = new FormData(event.currentTarget); + const email = formData.get("email"); + if (typeof email === "string") { + emailLogin(email); + } + }} + > + <TextField.Root + placeholder="Email Address" + type="email" + name="email" + required + disabled={isLoading} + /> + <Button + type="submit" + loading={emailIsLoading} + disabled={isLoading} + > + Send magic link + </Button>{" "} + {isLoading && ( + <Button onClick={onCancel}>Cancel</Button> + )} + <Text size="1" align="center"> + We will send you a one-time login link by email. + </Text> + </form> + </Flex> + </Flex> + </> + )} + </Accordion.Content> + </Accordion.Item> + </Accordion.Root> + + <Separator size="4" my="6" /> + + {!currentProvider && ( + <> + <Flex direction="column" gap="3"> + <Heading as="h3" size="4"> + Or bring your own provider + </Heading> + <Text size="2" color="gray"> + Configure one or more providers below, enable at least one + model, then Continue. </Text> - </Box> - <Separator size="4" my="4" /> - <Flex asChild direction="column" gap="3" mb="2"> - {/** TODO: handle these changes */} - <form - onSubmit={(event) => { - const formData = new FormData(event.currentTarget); - const endpoint = formData.get("endpoint"); - const apiKey = formData.get("api-key"); - if ( - apiKey && - typeof apiKey === "string" && - endpoint && - typeof endpoint === "string" - ) { - setupHost({ - type: "enterprise", - apiKey, - endpointAddress: endpoint, - }); - } else if (endpoint && typeof endpoint === "string") { - setupHost({ type: "self", endpointAddress: endpoint }); - } - // handle setUpHost - }} - > - <Box> - <Text as="label" htmlFor="endpoint"> - Endpoint - </Text> - <TextField.Root - type="url" - name="endpoint" - placeholder="http://x.x.x.x:8008/" - required - /> - </Box> + </Flex> - <Box> - <Text as="label" htmlFor="api-key"> - API Key (optional) - </Text> - <TextField.Root name="api-key" placeholder="your api key" /> - </Box> + <Box mt="4"> + <Grid columns={{ initial: "2", sm: "3" }} gap="3" width="100%"> + {sortedConfiguredProviders.map((provider) => ( + <ProviderCard + key={provider.name} + provider={provider} + setCurrentProvider={setCurrentProvider} + /> + ))} + </Grid> + </Box> + </> + )} - <Flex justify="end"> - <Button type="submit">Open in IDE</Button> - </Flex> - </form> + {currentProvider && ( + <Card mt="4" variant="surface" style={{ padding: "var(--space-4)" }}> + <Flex justify="between" align="center" mb="3" gap="3" wrap="wrap"> + <Heading as="h4" size="3"> + {currentProvider.display_name} + </Heading> + <Button + variant="outline" + onClick={() => setCurrentProvider(null)} + > + Back to providers + </Button> </Flex> - </Accordion.Content> - </Accordion.Item> - </Accordion.Root> - </Container> + <ProviderPreview + configuredProviders={sortedConfiguredProviders} + currentProvider={currentProvider} + handleSetCurrentProvider={setCurrentProvider} + /> + </Card> + )} + + <Flex justify="end" gap="3" mt="5" align="center" wrap="wrap"> + <Text size="2" color="gray"> + {providersQuery.isFetching + ? "Loading providers…" + : hasAnyActiveProvider + ? "Ready to start" + : "Enable at least one model to continue"} + </Text> + <Button + onClick={onContinue} + disabled={ + isLoading || providersQuery.isFetching || !hasAnyActiveProvider + } + > + Continue + </Button> + </Flex> + </Container> + </ScrollArea> ); }; diff --git a/refact-agent/gui/src/features/Login/providerAccess.ts b/refact-agent/gui/src/features/Login/providerAccess.ts new file mode 100644 index 000000000..89a7b125d --- /dev/null +++ b/refact-agent/gui/src/features/Login/providerAccess.ts @@ -0,0 +1,24 @@ +import type { ProviderListItem } from "../../services/refact"; + +export function hasAnyUsableActiveProvider({ + providers, + addressURL, + apiKey, +}: { + providers: ProviderListItem[]; + addressURL?: string; + apiKey?: string | null; +}): boolean { + return providers.some((provider) => { + if (provider.status !== "active") return false; + + if (provider.name !== "refact") { + return true; + } + + const normalizedAddress = (addressURL ?? "").trim().toLowerCase(); + const normalizedApiKey = (apiKey ?? "").trim(); + + return normalizedAddress === "refact" && normalizedApiKey.length > 0; + }); +} diff --git a/refact-agent/gui/src/features/Pages/pagesSlice.ts b/refact-agent/gui/src/features/Pages/pagesSlice.ts index a5d18e547..8d55cea94 100644 --- a/refact-agent/gui/src/features/Pages/pagesSlice.ts +++ b/refact-agent/gui/src/features/Pages/pagesSlice.ts @@ -1,13 +1,5 @@ import { createSlice, PayloadAction } from "@reduxjs/toolkit"; -export interface Welcome { - name: "welcome"; -} - -export interface TourEnd { - name: "tour end"; -} - export interface HistoryList { name: "history"; } @@ -42,6 +34,36 @@ export interface ProvidersPage { name: "providers page"; } +export interface TasksListPage { + name: "tasks list"; +} + +export interface TaskWorkspacePage { + name: "task workspace"; + taskId: string; +} + +export interface TaskAgentPage { + name: "task agent"; + taskId: string; + agentId: string; + chatId: string; +} + +export interface KnowledgeGraphPage { + name: "knowledge graph"; +} + +export interface CustomizationPage { + name: "customization"; + kind?: "modes" | "subagents" | "toolbox_commands" | "code_lens"; + configId?: string; +} + +export interface DefaultModelsPage { + name: "default models"; +} + export interface IntegrationsSetupPage { name: "integrations page"; projectPath?: string; @@ -53,8 +75,6 @@ export interface IntegrationsSetupPage { export type Page = | ChatPage - | Welcome - | TourEnd | HistoryList | FIMDebugPage | StatisticsPage @@ -62,7 +82,13 @@ export type Page = | ChatThreadHistoryPage | IntegrationsSetupPage | ProvidersPage - | LoginPage; + | LoginPage + | TasksListPage + | TaskWorkspacePage + | TaskAgentPage + | KnowledgeGraphPage + | CustomizationPage + | DefaultModelsPage; export function isIntegrationSetupPage( page: Page, @@ -100,14 +126,14 @@ export const pagesSlice = createSlice({ }); if (pageIndex === -1) { state.push(action.payload); - return state; + return; } - return state.slice(0, pageIndex + 1); + state.length = pageIndex + 1; }, change: (state, action: PayloadAction<Page>) => { - const last = state.slice(0, -1); - return last.concat(action.payload); + state.pop(); + state.push(action.payload); }, }, selectors: { diff --git a/refact-agent/gui/src/features/PatchesAndDiffsTracker/patchesAndDiffsTrackerSlice.ts b/refact-agent/gui/src/features/PatchesAndDiffsTracker/patchesAndDiffsTrackerSlice.ts index 78f52ea21..ca6153d31 100644 --- a/refact-agent/gui/src/features/PatchesAndDiffsTracker/patchesAndDiffsTrackerSlice.ts +++ b/refact-agent/gui/src/features/PatchesAndDiffsTracker/patchesAndDiffsTrackerSlice.ts @@ -1,8 +1,8 @@ import { createSelector, createSlice, PayloadAction } from "@reduxjs/toolkit"; -import { chatAskQuestionThunk, chatResponse } from "../Chat"; -import { isAssistantMessage, isDiffResponse } from "../../events"; -import { parseOrElse, partition } from "../../utils"; +import { applyChatEvent } from "../Chat/Thread/actions"; +import { partition } from "../../utils"; import { RootState } from "../../app/store"; +import { isDiffMessage } from "../../services/refact"; export type PatchMeta = { chatId: string; @@ -46,42 +46,25 @@ export const patchesAndDiffsTrackerSlice = createSlice({ }, extraReducers: (builder) => { - builder.addCase(chatAskQuestionThunk.pending, (state, action) => { - if (action.meta.arg.messages.length === 0) return state; - const { messages, chatId } = action.meta.arg; - const lastMessage = messages[messages.length - 1]; - if (!isAssistantMessage(lastMessage)) return state; - const toolCalls = lastMessage.tool_calls; - if (!toolCalls) return state; - const patches = toolCalls.reduce<PatchMeta[]>((acc, toolCall) => { - if (toolCall.id === undefined) return acc; - if (toolCall.function.name !== "patch") return acc; - const filePath = pathFromArgString(toolCall.function.arguments); - if (!filePath) return acc; - return [ - ...acc, - { - chatId, - toolCallId: toolCall.id, - filePath, - started: false, - completed: false, - }, - ]; - }, []); - state.patches.push(...patches); - }); - - builder.addCase(chatResponse, (state, action) => { - if (!isDiffResponse(action.payload)) return state; - const { id, tool_call_id } = action.payload; - const next = state.patches.map((patchMeta) => { - if (patchMeta.chatId !== id) return patchMeta; - if (patchMeta.toolCallId !== tool_call_id) return patchMeta; - return { ...patchMeta, completed: true }; - }); - - state.patches = next; + // Listen to SSE events for diff messages + builder.addCase(applyChatEvent, (state, action) => { + const { chat_id, ...event } = action.payload; + // Check for message_added events with diff role + if (event.type === "message_added") { + const msg = event.message; + if (isDiffMessage(msg)) { + const tool_call_id = + "tool_call_id" in msg ? msg.tool_call_id : undefined; + if (tool_call_id) { + const next = state.patches.map((patchMeta) => { + if (patchMeta.chatId !== chat_id) return patchMeta; + if (patchMeta.toolCallId !== tool_call_id) return patchMeta; + return { ...patchMeta, completed: true }; + }); + state.patches = next; + } + } + } }); }, @@ -128,17 +111,3 @@ export const selectCompletedPatchesFilePaths = createSelector( export const { setStartedByFilePaths, removePatchMetaByFileNameIfCompleted } = patchesAndDiffsTrackerSlice.actions; - -const pathFromArgString = (argString: string) => { - const args = parseOrElse<Record<string, unknown> | null>(argString, null); - if ( - args && - typeof args === "object" && - "path" in args && - typeof args.path === "string" - ) { - return args.path; - } else { - return null; - } -}; diff --git a/refact-agent/gui/src/features/Providers/ProviderCard/ProviderCard.tsx b/refact-agent/gui/src/features/Providers/ProviderCard/ProviderCard.tsx index 8856ea871..476a98992 100644 --- a/refact-agent/gui/src/features/Providers/ProviderCard/ProviderCard.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderCard/ProviderCard.tsx @@ -1,45 +1,50 @@ import React from "react"; -import { Card, Flex, Heading } from "@radix-ui/themes"; +import { Badge, Card, Flex, Heading } from "@radix-ui/themes"; -import { OnOffSwitch } from "../../../components/OnOffSwitch/OnOffSwitch"; import { iconsMap } from "../icons/iconsMap"; -import type { ConfiguredProvidersResponse } from "../../../services/refact"; +import type { + ProviderListItem, + ProviderStatus, +} from "../../../services/refact"; import { getProviderName } from "../getProviderName"; -import { useProviderCard } from "./useProviderCard"; import styles from "./ProviderCard.module.css"; -import { useUpdateProvider } from "../useUpdateProvider"; -import classNames from "classnames"; export type ProviderCardProps = { - provider: ConfiguredProvidersResponse["providers"][number]; - setCurrentProvider: ( - provider: ConfiguredProvidersResponse["providers"][number], - ) => void; + provider: ProviderListItem; + setCurrentProvider: (provider: ProviderListItem) => void; +}; + +const StatusDot: React.FC<{ status: ProviderStatus }> = ({ status }) => { + switch (status) { + case "active": + return ( + <Badge color="green" size="1" variant="soft"> + ● + </Badge> + ); + case "configured": + return ( + <Badge color="orange" size="1" variant="soft"> + ● + </Badge> + ); + default: + return null; + } }; export const ProviderCard: React.FC<ProviderCardProps> = ({ provider, setCurrentProvider, }) => { - const { isUpdatingEnabledState } = useUpdateProvider({ - provider, - }); - - const { handleClickOnProvider, handleSwitchClick } = useProviderCard({ - provider, - setCurrentProvider, - }); - return ( <Card size="2" - onClick={handleClickOnProvider} - className={classNames(styles.providerCard, { - [styles.providerCardDisabled]: isUpdatingEnabledState, - })} + onClick={() => setCurrentProvider(provider)} + className={styles.providerCard} > <Flex align="center" justify="between"> <Flex gap="3" align="center"> @@ -48,16 +53,15 @@ export const ProviderCard: React.FC<ProviderCardProps> = ({ {getProviderName(provider)} </Heading> </Flex> - {!provider.readonly && ( - <Flex align="center" gap="2"> - <OnOffSwitch - isEnabled={provider.enabled} - isUpdating={isUpdatingEnabledState} - isUnavailable={isUpdatingEnabledState} - handleClick={handleSwitchClick} - /> - </Flex> - )} + <Flex align="center" gap="2"> + {provider.model_count > 0 && ( + <Badge color="gray" size="1" variant="soft"> + {provider.model_count} model + {provider.model_count !== 1 ? "s" : ""} + </Badge> + )} + <StatusDot status={provider.status} /> + </Flex> </Flex> </Card> ); diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/FormFields.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/FormFields.tsx index 4df26f5c3..67732d06d 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/FormFields.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderForm/FormFields.tsx @@ -4,14 +4,14 @@ import classNames from "classnames"; import { Flex, Select, TextField } from "@radix-ui/themes"; import { toPascalCase } from "../../../utils/toPascalCase"; -import type { Provider } from "../../../services/refact"; +import type { ProviderFormValues } from "./useProviderForm"; import styles from "./ProviderForm.module.css"; export type FormFieldsProps = { - providerData: Provider; + providerData: ProviderFormValues; fields: Record<string, string | boolean>; - onChange: (updatedProviderData: Provider) => void; + onChange: (updatedProviderData: ProviderFormValues) => void; }; export const FormFields: FC<FormFieldsProps> = ({ @@ -20,16 +20,16 @@ export const FormFields: FC<FormFieldsProps> = ({ onChange, }) => { return Object.entries(fields).map(([key, value], idx) => { - if (key === "endpoint_style" && providerData.name === "custom") { - const availableOptions: Provider["endpoint_style"][] = ["openai", "hf"]; + if (key === "endpoint_style") { + const availableOptions = ["openai", "hf"]; const displayValues = ["OpenAI", "HuggingFace"]; return ( <Flex key={`${key}_${idx}`} direction="column"> {toPascalCase(key)} <Select.Root defaultValue={value.toString()} - onValueChange={(value: Provider["endpoint_style"]) => - onChange({ ...providerData, endpoint_style: value }) + onValueChange={(newValue) => + onChange({ ...providerData, endpoint_style: newValue }) } disabled={providerData.readonly} > @@ -46,15 +46,6 @@ export const FormFields: FC<FormFieldsProps> = ({ ); } - if (key === "endpoint_style") return null; - - if ( - !providerData.supports_completion && - (key === "completion_default_model" || key === "completion_endpoint") - ) { - return null; - } - return ( <Flex key={`${key}_${idx}`} direction="column" gap="1"> <label htmlFor={key}>{toPascalCase(key)}</label> diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderForm.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderForm.tsx index 86e4a1b5d..566b3d554 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderForm.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderForm.tsx @@ -1,123 +1,165 @@ import React from "react"; -import classNames from "classnames"; -import { Button, Flex, Separator, Switch } from "@radix-ui/themes"; +import { Badge, Button, Flex, Separator, Text } from "@radix-ui/themes"; -import { FormFields } from "./FormFields"; +import { SchemaField } from "./SchemaField"; +import { ProviderOAuth } from "./ProviderOAuth"; import { Spinner } from "../../../components/Spinner"; import { useProviderForm } from "./useProviderForm"; -import type { Provider, SimplifiedProvider } from "../../../services/refact"; - -import { toPascalCase } from "../../../utils/toPascalCase"; -import { aggregateProviderFields } from "./utils"; +import type { + ProviderListItem, + ProviderStatus, +} from "../../../services/refact"; import styles from "./ProviderForm.module.css"; import { ProviderModelsList } from "./ProviderModelsList/ProviderModelsList"; +import { useGetOpenRouterHealthQuery } from "../../../services/refact"; + +const SETTINGS_HIDDEN_PROVIDERS = ["refact", "refact_self_hosted"]; export type ProviderFormProps = { - currentProvider: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - >; - isProviderConfigured: boolean; - isSaving: boolean; - handleDiscardChanges: () => void; - handleSaveChanges: (updatedProviderData: Provider) => void; + currentProvider: ProviderListItem; +}; + +export type { ProviderListItem }; + +const StatusBadge: React.FC<{ status: ProviderStatus }> = ({ status }) => { + switch (status) { + case "active": + return ( + <Badge color="green" size="1"> + Active + </Badge> + ); + case "configured": + return ( + <Badge color="orange" size="1"> + Configured + </Badge> + ); + case "not_configured": + return ( + <Badge color="gray" size="1"> + Not configured + </Badge> + ); + default: + return null; + } }; export const ProviderForm: React.FC<ProviderFormProps> = ({ currentProvider, - isProviderConfigured, - isSaving, - handleDiscardChanges, - handleSaveChanges, }) => { + const { data: openRouterHealth } = useGetOpenRouterHealthQuery(undefined, { + skip: currentProvider.name !== "openrouter", + }); const { areShowingExtraFields, formValues, - handleFormValuesChange, + parsedSchema, + importantFields, + extraFields, isProviderLoadedSuccessfully, setAreShowingExtraFields, - shouldSaveButtonBeDisabled, + handleFieldSave, + detailedProvider, } = useProviderForm({ providerName: currentProvider.name }); - if (!isProviderLoadedSuccessfully || !formValues) return <Spinner spinning />; + if (!isProviderLoadedSuccessfully || !formValues || !parsedSchema) { + return <Spinner spinning />; + } - const { extraFields, importantFields } = aggregateProviderFields(formValues); + const hideSettings = SETTINGS_HIDDEN_PROVIDERS.includes(currentProvider.name); + const hasOAuth = parsedSchema.oauth?.supported === true; + const status: ProviderStatus = + detailedProvider?.status ?? currentProvider.status; + const hasCredentials = + detailedProvider?.has_credentials ?? currentProvider.has_credentials; + const isReadonly = formValues.readonly; return ( - <Flex - direction="column" - width="100%" - height="100%" - mt="2" - justify="between" - > - <Flex direction="column" width="100%" gap="2"> - <Flex align="center" justify="between" gap="3" mb="2"> - <label htmlFor={"enabled"}>{toPascalCase("enabled")}</label> - <Switch - id={"enabled"} - checked={Boolean(formValues.enabled)} - value={formValues.enabled ? "on" : "off"} - disabled={formValues.readonly} - className={classNames({ - [styles.disabledSwitch]: formValues.readonly, - })} - onCheckedChange={(checked) => - handleFormValuesChange({ ...formValues, ["enabled"]: checked }) - } - /> - </Flex> - <Separator size="4" mb="2" /> - <Flex direction="column" gap="2"> - <FormFields - providerData={formValues} - fields={importantFields} - onChange={handleFormValuesChange} - /> - </Flex> - - {areShowingExtraFields && ( - <Flex direction="column" gap="2" mt="4"> - <FormFields - providerData={formValues} - fields={extraFields} - onChange={handleFormValuesChange} - /> - </Flex> + <Flex direction="column" width="100%" height="100%" mt="2" gap="3"> + <Flex align="center" gap="2"> + <StatusBadge status={status} /> + {currentProvider.name === "openrouter" && openRouterHealth && ( + <Badge color={openRouterHealth.ok ? "green" : "red"} size="1"> + {openRouterHealth.ok ? "Key OK" : "Key Error"} + </Badge> )} - <Flex my="2" align="center" justify="center"> - <Button - className={classNames(styles.button, styles.extraButton)} - variant="ghost" - color="gray" - onClick={() => setAreShowingExtraFields((prev) => !prev)} - > - {areShowingExtraFields ? "Hide" : "Show"} advanced fields - </Button> - </Flex> - {isProviderConfigured && ( - <ProviderModelsList provider={currentProvider} /> + {parsedSchema.description && ( + <Text size="1" color="gray" style={{ flex: 1 }}> + {parsedSchema.description.trim().split("\n")[0]} + </Text> )} </Flex> - <Flex gap="2" align="center" mt="4"> - <Button - className={styles.button} - variant="outline" - onClick={handleDiscardChanges} - > - Cancel - </Button> - <Button - className={styles.button} - variant="solid" - disabled={isSaving || shouldSaveButtonBeDisabled} - title="Save Provider configuration" - onClick={() => handleSaveChanges(formValues)} - > - {isSaving ? "Saving..." : "Save"} - </Button> - </Flex> + + {!hideSettings && ( + <Flex direction="column" width="100%" gap="3"> + {hasOAuth && ( + <> + <ProviderOAuth + providerName={currentProvider.name} + oauthConnected={Boolean( + "oauth_connected" in formValues && formValues.oauth_connected, + )} + authStatus={ + "auth_status" in formValues + ? String(formValues.auth_status) + : "" + } + /> + {importantFields.length > 0 && <Separator size="4" />} + </> + )} + + <Flex direction="column" gap="3"> + {importantFields.map((field) => ( + <SchemaField + key={field.key} + field={field} + value={formValues[field.key]} + disabled={isReadonly} + onSave={handleFieldSave} + /> + ))} + </Flex> + + {extraFields.length > 0 && ( + <> + <Flex align="center" justify="center"> + <Button + className={styles.extraButton} + variant="ghost" + color="gray" + size="1" + onClick={() => setAreShowingExtraFields((prev) => !prev)} + > + {areShowingExtraFields ? "Hide" : "Show"} advanced fields + </Button> + </Flex> + + {areShowingExtraFields && ( + <Flex direction="column" gap="3"> + {extraFields.map((field) => ( + <SchemaField + key={field.key} + field={field} + value={formValues[field.key]} + disabled={isReadonly} + onSave={handleFieldSave} + /> + ))} + </Flex> + )} + </> + )} + </Flex> + )} + + {(hasCredentials || hideSettings) && ( + <ProviderModelsList provider={currentProvider} /> + )} </Flex> ); }; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AddCustomModelModal.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AddCustomModelModal.tsx new file mode 100644 index 000000000..2aa8653d8 --- /dev/null +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AddCustomModelModal.tsx @@ -0,0 +1,187 @@ +import { type FC, useState, useCallback } from "react"; +import { + Button, + Checkbox, + Dialog, + Flex, + Text, + TextField, +} from "@radix-ui/themes"; + +import { + useAddCustomModelMutation, + type AddCustomModelRequest, +} from "../../../../services/refact"; + +export type AddCustomModelModalProps = { + providerName: string; + isOpen: boolean; + onClose: () => void; +}; + +export const AddCustomModelModal: FC<AddCustomModelModalProps> = ({ + providerName, + isOpen, + onClose, +}) => { + const [addCustomModel, { isLoading }] = useAddCustomModelMutation(); + + const [modelId, setModelId] = useState(""); + const [nCtx, setNCtx] = useState("4096"); + const [supportsTools, setSupportsTools] = useState(false); + const [supportsMultimodality, setSupportsMultimodality] = useState(false); + const [supportsThinkingBudget, setSupportsThinkingBudget] = useState(false); + const [tokenizer, setTokenizer] = useState(""); + + const resetForm = useCallback(() => { + setModelId(""); + setNCtx("4096"); + setSupportsTools(false); + setSupportsMultimodality(false); + setSupportsThinkingBudget(false); + setTokenizer(""); + }, []); + + const handleSubmit = useCallback(async () => { + const model: AddCustomModelRequest = { + id: modelId.trim(), + n_ctx: parseInt(nCtx, 10) || 4096, + supports_tools: supportsTools, + supports_multimodality: supportsMultimodality, + supports_thinking_budget: supportsThinkingBudget, + tokenizer: tokenizer.trim() || null, + }; + + try { + await addCustomModel({ providerName, model }).unwrap(); + resetForm(); + onClose(); + } catch (e) { + // eslint-disable-next-line no-console + console.error("Failed to add custom model:", e); + } + }, [ + addCustomModel, + providerName, + modelId, + nCtx, + supportsTools, + supportsMultimodality, + supportsThinkingBudget, + tokenizer, + resetForm, + onClose, + ]); + + const isValid = modelId.trim().length > 0 && parseInt(nCtx, 10) > 0; + + return ( + <Dialog.Root open={isOpen} onOpenChange={(open) => !open && onClose()}> + <Dialog.Content style={{ maxWidth: 450 }}> + <Dialog.Title>Add Custom Model</Dialog.Title> + <Dialog.Description size="2" mb="4"> + Define a custom model for {providerName}. You&apos;ll need to specify + its capabilities manually. + </Dialog.Description> + + <Flex direction="column" gap="3"> + <Flex direction="column" gap="1"> + <Text as="label" size="2" weight="medium"> + Model ID * + </Text> + <TextField.Root + placeholder="e.g., my-custom-model" + value={modelId} + onChange={(e) => setModelId(e.target.value)} + /> + </Flex> + + <Flex direction="column" gap="1"> + <Text as="label" size="2" weight="medium"> + Context Length * + </Text> + <TextField.Root + type="number" + placeholder="4096" + value={nCtx} + onChange={(e) => setNCtx(e.target.value)} + /> + </Flex> + + <Flex direction="column" gap="2"> + <Text as="label" size="2" weight="medium"> + Capabilities + </Text> + + <Flex align="center" gap="2"> + <Checkbox + id="supports_tools" + checked={supportsTools} + onCheckedChange={(checked) => + setSupportsTools(checked === true) + } + /> + <Text as="label" htmlFor="supports_tools" size="2"> + Supports Tools (function calling) + </Text> + </Flex> + + <Flex align="center" gap="2"> + <Checkbox + id="supports_multimodality" + checked={supportsMultimodality} + onCheckedChange={(checked) => + setSupportsMultimodality(checked === true) + } + /> + <Text as="label" htmlFor="supports_multimodality" size="2"> + Supports Images/Vision + </Text> + </Flex> + + <Flex align="center" gap="2"> + <Checkbox + id="supports_thinking_budget" + checked={supportsThinkingBudget} + onCheckedChange={(checked) => + setSupportsThinkingBudget(checked === true) + } + /> + <Text as="label" htmlFor="supports_thinking_budget" size="2"> + Supports Thinking Budget + </Text> + </Flex> + </Flex> + + <Flex direction="column" gap="1"> + <Text as="label" size="2" weight="medium"> + Tokenizer (optional) + </Text> + <TextField.Root + placeholder="hf://Xenova/claude-tokenizer" + value={tokenizer} + onChange={(e) => setTokenizer(e.target.value)} + /> + <Text as="span" size="1" color="gray"> + HuggingFace tokenizer path for accurate token counting + </Text> + </Flex> + </Flex> + + <Flex gap="3" mt="4" justify="end"> + <Dialog.Close> + <Button variant="soft" color="gray"> + Cancel + </Button> + </Dialog.Close> + <Button + onClick={() => void handleSubmit()} + disabled={!isValid || isLoading} + > + {isLoading ? "Adding..." : "Add Model"} + </Button> + </Flex> + </Dialog.Content> + </Dialog.Root> + ); +}; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AvailableModelCard.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AvailableModelCard.tsx new file mode 100644 index 000000000..a524af0d2 --- /dev/null +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/AvailableModelCard.tsx @@ -0,0 +1,489 @@ +import { type FC, useCallback, useEffect, useMemo, useState } from "react"; +import classNames from "classnames"; +import { + Badge, + Card, + Flex, + IconButton, + Button, + Switch, + Text, + Tooltip, +} from "@radix-ui/themes"; +import { TrashIcon } from "@radix-ui/react-icons"; +import * as RadixCollapsible from "@radix-ui/react-collapsible"; + +import type { AvailableModel } from "../../../../services/refact"; +import { + useToggleModelMutation, + useSetModelProviderMutation, + useRemoveCustomModelMutation, + useGetOpenRouterModelEndpointsQuery, +} from "../../../../services/refact"; + +import styles from "./ModelCard.module.css"; + +export type AvailableModelCardProps = { + model: AvailableModel; + providerName: string; + isReadonlyProvider: boolean; +}; + +/** + * Card component that displays an available model with enable/disable toggle + */ +export const AvailableModelCard: FC<AvailableModelCardProps> = ({ + model, + providerName, + isReadonlyProvider, +}) => { + const [toggleModel, { isLoading: isToggling }] = useToggleModelMutation(); + const [setModelProvider, { isLoading: isSettingProvider }] = + useSetModelProviderMutation(); + const [removeCustomModel, { isLoading: isRemoving }] = + useRemoveCustomModelMutation(); + const [optimisticEnabled, setOptimisticEnabled] = useState(model.enabled); + const [optimisticSelectedProvider, setOptimisticSelectedProvider] = useState( + model.selected_provider ?? "", + ); + const [detailsOpen, setDetailsOpen] = useState(false); + + useEffect(() => { + setOptimisticEnabled(model.enabled); + }, [model.enabled]); + + useEffect(() => { + setOptimisticSelectedProvider(model.selected_provider ?? ""); + }, [model.selected_provider]); + + const isLoading = isToggling || isRemoving || isSettingProvider; + + const providerVariants = useMemo(() => { + if (!model.provider_variants?.length) return []; + return [...model.provider_variants].sort((a, b) => + a.id.localeCompare(b.id), + ); + }, [model.provider_variants]); + + const availableProviders = useMemo(() => { + if (!model.available_providers?.length) return []; + return [...model.available_providers].sort((a, b) => a.localeCompare(b)); + }, [model.available_providers]); + + const shouldFetchEndpoints = + providerName === "openrouter" && + detailsOpen && + providerVariants.length === 0 && + availableProviders.length === 0; + + const { data: endpointsData } = useGetOpenRouterModelEndpointsQuery( + { providerName, modelId: model.id }, + { skip: !shouldFetchEndpoints }, + ); + + const resolvedProviderVariants = + providerVariants.length > 0 + ? providerVariants + : endpointsData?.provider_variants ?? []; + const resolvedAvailableProviders = + availableProviders.length > 0 + ? availableProviders + : endpointsData?.available_providers ?? []; + + const hasProviderRouting = + providerName === "openrouter" || + resolvedProviderVariants.length > 0 || + resolvedAvailableProviders.length > 0 || + Boolean(model.selected_provider); + + const handleToggle = useCallback( + async (checked: boolean) => { + setOptimisticEnabled(checked); + try { + await toggleModel({ + providerName, + modelId: model.id, + enabled: checked, + }).unwrap(); + } catch { + // Revert on error + setOptimisticEnabled(!checked); + } + }, + [toggleModel, providerName, model.id], + ); + + const handleRemove = useCallback(async () => { + if (!model.is_custom) return; + try { + await removeCustomModel({ + providerName, + modelId: model.id, + }).unwrap(); + } catch (e) { + // eslint-disable-next-line no-console + console.error("Failed to remove custom model:", e); + } + }, [removeCustomModel, providerName, model.id, model.is_custom]); + + const handleProviderSelect = useCallback( + async (provider: string) => { + const normalized = provider === "" ? null : provider; + const previous = optimisticSelectedProvider; + setOptimisticSelectedProvider(provider); + try { + await setModelProvider({ + providerName, + modelId: model.id, + selectedProvider: normalized, + }).unwrap(); + if (!optimisticEnabled) { + setOptimisticEnabled(true); + try { + await toggleModel({ + providerName, + modelId: model.id, + enabled: true, + }).unwrap(); + } catch { + setOptimisticEnabled(false); + } + } + } catch { + setOptimisticSelectedProvider(previous); + } + }, + [ + model.id, + optimisticEnabled, + optimisticSelectedProvider, + providerName, + setModelProvider, + toggleModel, + ], + ); + + // Format context size for display + const formatContextSize = (n_ctx: number) => { + if (n_ctx >= 1000000) return `${(n_ctx / 1000000).toFixed(1)}M`; + if (n_ctx >= 1000) return `${Math.round(n_ctx / 1000)}K`; + return `${n_ctx}`; + }; + + const formatPrice = (price?: number | null) => + typeof price === "number" ? `$${price.toFixed(2)}` : "–"; + + const renderProviderRow = ( + variant: (typeof resolvedProviderVariants)[number], + ) => { + const isSelected = optimisticSelectedProvider === variant.id; + return ( + <div + key={variant.id} + className={classNames(styles.providerRow, { + [styles.providerRowSelected]: isSelected, + })} + > + <Text size="1" className={styles.providerCellPrimary}> + {variant.tag ?? variant.name ?? variant.id} + </Text> + <Text size="1"> + {variant.context_length + ? formatContextSize(variant.context_length) + : "–"} + </Text> + <Text size="1"> + {variant.max_output_tokens + ? formatContextSize(variant.max_output_tokens) + : "–"} + </Text> + <Text size="1">{formatPrice(variant.pricing?.prompt)}</Text> + <Text size="1">{formatPrice(variant.pricing?.generated)}</Text> + <Text size="1"> + {formatPrice(variant.pricing?.cache_read)} /{" "} + {formatPrice(variant.pricing?.cache_creation)} + </Text> + <Text size="1"> + {typeof variant.latency_last_30m === "number" + ? `${variant.latency_last_30m.toFixed(2)}s` + : "–"} + </Text> + <Text size="1"> + {typeof variant.throughput_last_30m === "number" + ? `${variant.throughput_last_30m.toFixed(0)} tps` + : "–"} + </Text> + <Text size="1"> + {typeof variant.uptime_last_30m === "number" + ? `${variant.uptime_last_30m.toFixed(0)}%` + : "–"} + </Text> + <Text size="1" className={styles.providerCellCaps}> + {variant.supported_parameters?.length + ? variant.supported_parameters.join(", ") + : "–"} + </Text> + <Button + size="1" + variant={isSelected ? "solid" : "soft"} + disabled={isSelected || isReadonlyProvider || isLoading} + onClick={(event) => { + event.stopPropagation(); + void handleProviderSelect(variant.id); + }} + > + {isSelected ? "Selected" : "Select"} + </Button> + </div> + ); + }; + + const handleCardClick = useCallback(() => { + if (!hasProviderRouting) return; + setDetailsOpen((prev) => !prev); + }, [hasProviderRouting]); + + return ( + <Card + className={classNames({ [styles.disabledCard]: isLoading })} + onClick={handleCardClick} + style={{ cursor: hasProviderRouting ? "pointer" : "default" }} + > + <Flex align="center" justify="between" gap="3"> + <Flex direction="column" gap="1" style={{ flex: 1, minWidth: 0 }}> + <Flex gap="2" align="center" wrap="wrap"> + <Text + as="span" + size="2" + weight="medium" + style={{ + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + }} + > + {model.display_name ?? model.id} + </Text> + {model.is_custom && ( + <Badge size="1" color="purple"> + Custom + </Badge> + )} + </Flex> + + <Flex gap="2" align="center" wrap="wrap"> + <Tooltip + content={`Context window: ${model.n_ctx.toLocaleString()} tokens`} + > + <Text as="span" size="1" color="gray"> + 📏 {formatContextSize(model.n_ctx)} + </Text> + </Tooltip> + {model.supports_tools && ( + <Tooltip content="Supports tool/function calling"> + <Text as="span" size="1" color="gray"> + 🔧 + </Text> + </Tooltip> + )} + {model.supports_multimodality && ( + <Tooltip content="Supports images/vision"> + <Text as="span" size="1" color="gray"> + 👁️ + </Text> + </Tooltip> + )} + {(!!model.reasoning_effort_options?.length || + !!model.supports_thinking_budget || + !!model.supports_adaptive_thinking_budget) && ( + <Tooltip content="Supports reasoning"> + <Text as="span" size="1" color="gray"> + 🧠 + </Text> + </Tooltip> + )} + {typeof model.max_output_tokens === "number" && + model.max_output_tokens > 0 && ( + <Tooltip + content={`Max output tokens: ${model.max_output_tokens.toLocaleString()}`} + > + <Text as="span" size="1" color="gray"> + ✂️ {formatContextSize(model.max_output_tokens)} out + </Text> + </Tooltip> + )} + {model.pricing && ( + <Tooltip content="Pricing per 1M tokens (input/output)"> + <Text as="span" size="1" color="gray"> + 💲 ${model.pricing.prompt.toFixed(2)}/$ + {model.pricing.generated.toFixed(2)} + </Text> + </Tooltip> + )} + </Flex> + + {hasProviderRouting && ( + <RadixCollapsible.Root + open={detailsOpen} + onOpenChange={setDetailsOpen} + > + <RadixCollapsible.Content className={styles.providerPanel}> + <Text as="span" size="1" color="gray"> + Selecting a provider will enable the model automatically. + </Text> + {resolvedProviderVariants.length > 0 ? ( + <div className={styles.providerTableWrap}> + <div className={styles.providerHeaderRow}> + <Text size="1">Provider</Text> + <Text size="1">Context</Text> + <Text size="1">Max out</Text> + <Text size="1">Input</Text> + <Text size="1">Output</Text> + <Text size="1">Cache R/W</Text> + <Text size="1">Latency</Text> + <Text size="1">Throughput</Text> + <Text size="1">Uptime</Text> + <Text size="1">Capabilities</Text> + <Text size="1">Action</Text> + </div> + <div + className={classNames(styles.providerRow, { + [styles.providerRowSelected]: + optimisticSelectedProvider === "", + })} + > + <Text size="1" className={styles.providerCellPrimary}> + Auto + </Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Text size="1">–</Text> + <Button + size="1" + variant={ + optimisticSelectedProvider === "" ? "solid" : "soft" + } + disabled={ + optimisticSelectedProvider === "" || + isReadonlyProvider || + isLoading + } + onClick={(event) => { + event.stopPropagation(); + void handleProviderSelect(""); + }} + > + {optimisticSelectedProvider === "" + ? "Selected" + : "Select"} + </Button> + </div> + {resolvedProviderVariants.map(renderProviderRow)} + </div> + ) : ( + <div className={styles.providerTableWrap}> + <Flex direction="column" gap="2"> + <Flex align="center" justify="between" gap="2"> + <Text size="1" className={styles.providerCellPrimary}> + Auto + </Text> + <Button + size="1" + variant={ + optimisticSelectedProvider === "" ? "solid" : "soft" + } + disabled={ + optimisticSelectedProvider === "" || + isReadonlyProvider || + isLoading + } + onClick={(event) => { + event.stopPropagation(); + void handleProviderSelect(""); + }} + > + {optimisticSelectedProvider === "" + ? "Selected" + : "Select"} + </Button> + </Flex> + {resolvedAvailableProviders.length === 0 && ( + <Text size="1" color="gray"> + No provider routing data available. + </Text> + )} + {resolvedAvailableProviders.map((provider) => { + const isSelected = + optimisticSelectedProvider === provider; + return ( + <Flex + key={provider} + align="center" + justify="between" + gap="2" + > + <Text + size="1" + className={styles.providerCellPrimary} + > + {provider} + </Text> + <Button + size="1" + variant={isSelected ? "solid" : "soft"} + disabled={ + isSelected || isReadonlyProvider || isLoading + } + onClick={(event) => { + event.stopPropagation(); + void handleProviderSelect(provider); + }} + > + {isSelected ? "Selected" : "Select"} + </Button> + </Flex> + ); + })} + </Flex> + </div> + )} + </RadixCollapsible.Content> + </RadixCollapsible.Root> + )} + </Flex> + + <Flex align="center" gap="2"> + {model.is_custom && !isReadonlyProvider && ( + <Tooltip content="Remove custom model"> + <IconButton + size="1" + variant="ghost" + color="red" + onClick={(event) => { + event.stopPropagation(); + void handleRemove(); + }} + disabled={isLoading} + > + <TrashIcon /> + </IconButton> + </Tooltip> + )} + <Switch + size="1" + checked={optimisticEnabled} + disabled={isReadonlyProvider || isLoading} + onClick={(event) => event.stopPropagation()} + onCheckedChange={(checked) => void handleToggle(checked)} + /> + </Flex> + </Flex> + </Card> + ); +}; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ModelCard.module.css b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ModelCard.module.css index e61fee90d..a49bfdfc5 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ModelCard.module.css +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ModelCard.module.css @@ -3,3 +3,72 @@ pointer-events: none; transition: 0.15s ease-in-out; } + +.providerToggle { + justify-content: space-between; + width: 100%; + padding: var(--space-1) var(--space-2); +} + +.providerChevron { + margin-left: var(--space-2); +} + +.providerPanel { + margin-top: var(--space-2); + display: flex; + flex-direction: column; + gap: var(--space-2); +} + +.providerTableWrap { + composes: scrollbarThin from "../../../../components/shared/scrollbar.module.css"; + border: 1px solid var(--gray-a4); + border-radius: var(--radius-3); + padding: var(--space-2); + overflow-x: auto; +} + +.providerHeaderRow, +.providerRow { + display: grid; + grid-template-columns: 1.2fr 0.8fr 0.8fr 0.7fr 0.7fr 0.9fr 0.8fr 0.9fr 0.7fr 1.3fr 0.7fr; + align-items: center; + gap: var(--space-2); + min-width: 900px; +} + +.providerHeaderRow { + padding-bottom: var(--space-2); + border-bottom: 1px solid var(--gray-a4); + color: var(--gray-11); + font-weight: var(--font-weight-medium); +} + +.providerRow { + padding: var(--space-2) 0; + border-bottom: 1px solid var(--gray-a2); +} + +.providerRow:last-child { + border-bottom: none; +} + +.providerRowSelected { + background: var(--gray-a2); + border-radius: var(--radius-3); + padding-left: var(--space-2); + padding-right: var(--space-2); +} + +.providerCellPrimary { + font-weight: var(--font-weight-medium); + color: var(--gray-12); +} + +.providerCellCaps { + color: var(--gray-11); + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ProviderModelsList.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ProviderModelsList.tsx index c392d2c42..5ac2b3561 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ProviderModelsList.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/ProviderModelsList.tsx @@ -1,173 +1,222 @@ -import { useCallback, useMemo, type FC } from "react"; -import { Flex, Heading, Separator, Text } from "@radix-ui/themes"; - -import type { ProviderFormProps } from "../ProviderForm"; +import { useMemo, useState, type FC } from "react"; +import { + Badge, + Button, + Callout, + Flex, + Heading, + Separator, + Text, + TextField, +} from "@radix-ui/themes"; +import { PlusIcon, InfoCircledIcon } from "@radix-ui/react-icons"; + +import type { ProviderListItem } from "../../../../services/refact"; +import { + useGetAvailableModelsQuery, + useGetOpenRouterAccountInfoQuery, +} from "../../../../services/refact"; +import { toPascalCase } from "../../../../utils/toPascalCase"; import { Spinner } from "../../../../components/Spinner"; -import { ModelCard } from "./ModelCard"; -import { AddModelButton } from "./components"; - -import { useGetModelsByProviderNameQuery } from "../../../../hooks/useModelsQuery"; -import { ModelsResponse, useGetCapsQuery } from "../../../../services/refact"; -import { groupModelsWithPricing } from "./utils/groupModelsWithPricing"; +import { AvailableModelCard } from "./AvailableModelCard"; +import { AddCustomModelModal } from "./AddCustomModelModal"; export type ProviderModelsListProps = { - provider: ProviderFormProps["currentProvider"]; -}; - -const NoModelsText: FC = () => { - return ( - <Text as="span" size="2" color="gray"> - No models available, but you can add one by clicking &apos;Add model&apos; - </Text> - ); + provider: ProviderListItem; }; export const ProviderModelsList: FC<ProviderModelsListProps> = ({ provider, }) => { + const [searchQuery, setSearchQuery] = useState(""); const { data: modelsData, isSuccess, isLoading, - } = useGetModelsByProviderNameQuery({ - providerName: provider.name, - }); - - // Fetch capabilities & pricing; UI will gracefully degrade if this fails. - const { data: capsData, isError: capsError } = useGetCapsQuery(undefined); - - const getModelNames = useCallback((modelsData: ModelsResponse) => { - const currentChatModelNames = modelsData.chat_models.map((m) => m.name); - const currentCompletionModelNames = modelsData.completion_models.map( - (m) => m.name, - ); - - return { - currentChatModelNames, - currentCompletionModelNames, - }; - }, []); - - // Compute groups early so hooks are always called in the same order - const chatGroups = useMemo( - () => - modelsData?.chat_models - ? groupModelsWithPricing(modelsData.chat_models, { - caps: capsError ? undefined : capsData, - modelType: "chat", - }) - : [], - [modelsData?.chat_models, capsData, capsError], + isError, + error, + } = useGetAvailableModelsQuery({ providerName: provider.name }); + + const [isAddModalOpen, setIsAddModalOpen] = useState(false); + const { data: openRouterAccount } = useGetOpenRouterAccountInfoQuery( + undefined, + { + skip: provider.name !== "openrouter", + }, ); - const completionGroups = useMemo( - () => - modelsData?.completion_models - ? groupModelsWithPricing(modelsData.completion_models, { - caps: capsError ? undefined : capsData, - modelType: "completion", - }) - : [], - [modelsData?.completion_models, capsData, capsError], - ); + const filteredModels = useMemo(() => { + if (!modelsData?.models) return []; + const query = searchQuery.trim().toLowerCase(); + if (!query) return modelsData.models; + return modelsData.models.filter((model) => { + const name = (model.display_name ?? model.id).toLowerCase(); + const id = model.id.toLowerCase(); + return name.includes(query) || id.includes(query); + }); + }, [modelsData?.models, searchQuery]); + + const groupedByFamily = useMemo(() => { + if (provider.name !== "openrouter") return null; + const groups = new Map<string, typeof filteredModels>(); + + filteredModels.forEach((model) => { + const family = model.id.includes("/") ? model.id.split("/")[0] : "other"; + const entry = groups.get(family) ?? []; + entry.push(model); + groups.set(family, entry); + }); + + return Array.from(groups.entries()).sort(([a], [b]) => a.localeCompare(b)); + }, [filteredModels, provider.name]); if (isLoading) return <Spinner spinning />; - if (!isSuccess) return <div>Something went wrong :/</div>; - - const { chat_models, completion_models } = modelsData; + if (isError) { + const err = error as + | { status?: unknown; data?: { detail?: unknown } } + | undefined; + const errorMessage = err?.status + ? `${String(err.status)}: ${ + err.data?.detail ? String(err.data.detail) : "Unknown error" + }` + : "Failed to load models"; + + return ( + <Callout.Root color="red"> + <Callout.Icon> + <InfoCircledIcon /> + </Callout.Icon> + <Callout.Text>Failed to load models: {errorMessage}</Callout.Text> + </Callout.Root> + ); + } + + if (!isSuccess) { + return ( + <Callout.Root color="orange"> + <Callout.Icon> + <InfoCircledIcon /> + </Callout.Icon> + <Callout.Text> + No model data available. Make sure the provider is properly + configured. + </Callout.Text> + </Callout.Root> + ); + } - const { currentChatModelNames, currentCompletionModelNames } = - getModelNames(modelsData); + const totalModels = modelsData.models.length; + const enabledCount = modelsData.models.filter( + (model) => model.enabled, + ).length; return ( - <Flex direction="column" gap="2"> - <Heading as="h3" size="3"> - Models list - </Heading> + <Flex direction="column" gap="3" mt="4"> <Separator size="4" /> - {/* Chat models section */} - <Heading as="h6" size="2" my="2"> - Chat Models - </Heading> - - {chat_models.length > 0 ? ( - chatGroups.map((group) => ( - <Flex key={group.id} direction="column" gap="1" my="1"> - {chatGroups.length > 1 && ( - <Text as="span" size="1" color="gray" weight="medium"> - {group.title} - {group.description ? ` — ${group.description}` : ""} - </Text> - )} - {group.models.map((m) => ( - <ModelCard - key={`${m.name}_chat`} - model={m} - providerName={provider.name} - modelType="chat" - isReadonlyProvider={provider.readonly} - currentModelNames={currentChatModelNames} - /> - ))} - </Flex> - )) - ) : ( - <NoModelsText /> + <Flex align="center" justify="between" gap="3" wrap="wrap"> + <Flex align="center" gap="2" wrap="wrap"> + <Heading as="h3" size="3"> + Available Models + </Heading> + <Badge size="1" color="gray"> + {enabledCount}/{totalModels} enabled + </Badge> + <TextField.Root + size="1" + placeholder="Search models" + value={searchQuery} + onChange={(event) => setSearchQuery(event.target.value)} + style={{ minWidth: 180 }} + /> + </Flex> + + {!provider.readonly && ( + <Button + size="1" + variant="soft" + onClick={() => setIsAddModalOpen(true)} + > + <PlusIcon /> Add Custom Model + </Button> + )} + </Flex> + + {modelsData.error && ( + <Callout.Root color="orange" size="1"> + <Callout.Icon> + <InfoCircledIcon /> + </Callout.Icon> + <Callout.Text size="1">{modelsData.error}</Callout.Text> + </Callout.Root> )} - {!provider.readonly && ( - <AddModelButton - modelType="chat" - providerName={provider.name} - currentModelNames={currentChatModelNames} - /> + {provider.name === "openrouter" && openRouterAccount?.data && ( + <Callout.Root color="blue" size="1"> + <Callout.Icon> + <InfoCircledIcon /> + </Callout.Icon> + <Callout.Text size="1"> + OpenRouter balance:{" "} + {openRouterAccount.data.remaining?.toFixed(2) ?? "0.00"} + {" / "} + {openRouterAccount.data.limit?.toFixed(2) ?? "0.00"} USD + {openRouterAccount.data.key_label + ? ` · Key: ${openRouterAccount.data.key_label}` + : ""} + </Callout.Text> + </Callout.Root> )} - {/* Completion models section */} - {provider.supports_completion && ( - <> - <Heading as="h6" size="2" my="2"> - Completion Models - </Heading> - {completion_models.length > 0 ? ( - completionGroups.map((group) => ( - <Flex key={group.id} direction="column" gap="1" my="1"> - {completionGroups.length > 1 && ( - <Text as="span" size="1" color="gray" weight="medium"> - {group.title} - {group.description ? ` — ${group.description}` : ""} - </Text> - )} - {group.models.map((m) => ( - <ModelCard - key={`${m.name}_completion`} - model={m} - providerName={provider.name} - modelType="completion" - isReadonlyProvider={provider.readonly} - currentModelNames={currentCompletionModelNames} - /> - ))} - </Flex> - )) - ) : ( - <NoModelsText /> - )} - + {filteredModels.length === 0 ? ( + <Flex direction="column" align="center" gap="2" py="4"> + <Text as="span" size="2" color="gray"> + {totalModels === 0 + ? "No models available for this provider." + : "No models match your search."} + </Text> {!provider.readonly && ( - <AddModelButton - modelType="completion" - providerName={provider.name} - currentModelNames={currentCompletionModelNames} - /> + <Text as="span" size="1" color="gray"> + Click &quot;Add Custom Model&quot; to define your own. + </Text> )} - </> + </Flex> + ) : ( + <Flex direction="column" gap="2"> + {groupedByFamily + ? groupedByFamily.map(([family, group]) => ( + <Flex key={family} direction="column" gap="2"> + <Text as="span" size="1" color="gray" weight="medium" mt="2"> + {toPascalCase(family)} · {group.length} + </Text> + {group.map((model) => ( + <AvailableModelCard + key={model.id} + model={model} + providerName={provider.name} + isReadonlyProvider={provider.readonly} + /> + ))} + </Flex> + )) + : filteredModels.map((model) => ( + <AvailableModelCard + key={model.id} + model={model} + providerName={provider.name} + isReadonlyProvider={provider.readonly} + /> + ))} + </Flex> )} - {/* Embedding model could be handled in a similar way in the future */} + <AddCustomModelModal + providerName={provider.name} + isOpen={isAddModalOpen} + onClose={() => setIsAddModalOpen(false)} + /> </Flex> ); }; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/CapabilityIcons.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/CapabilityIcons.tsx index 5202d40d5..3978476cc 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/CapabilityIcons.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/CapabilityIcons.tsx @@ -5,7 +5,6 @@ import { ImageIcon, CursorArrowIcon, RocketIcon, - LightningBoltIcon, GearIcon, } from "@radix-ui/react-icons"; import type { ModelCapabilities } from "../utils/groupModelsWithPricing"; @@ -46,21 +45,13 @@ export const CapabilityIcons: FC<CapabilityIconsProps> = ({ <RocketIcon style={iconStyle} color="var(--gray-11)" /> </span> )} - {capabilities.supportsReasoning && ( - <span - title={`Reasoning: ${capabilities.supportsReasoning}${ - capabilities.supportsBoostReasoning ? " (boostable)" : "" - }`} - > + {(!!capabilities.reasoningEffortOptions?.length || + !!capabilities.supportsThinkingBudget || + !!capabilities.supportsAdaptiveThinkingBudget) && ( + <span title="Reasoning"> <ChatBubbleIcon style={iconStyle} color="var(--blue-11)" /> </span> )} - {capabilities.supportsBoostReasoning && - !capabilities.supportsReasoning && ( - <span title="Boost reasoning"> - <LightningBoltIcon style={iconStyle} color="var(--amber-11)" /> - </span> - )} </Flex> ); }; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/ModelCardPopup.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/ModelCardPopup.tsx index 216a2fa93..dc29fcfdd 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/ModelCardPopup.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/components/ModelCardPopup.tsx @@ -16,6 +16,7 @@ import { } from "../../../../../hooks/useModelsQuery"; import { FormField } from "./FormField"; +import { FormSelect } from "./FormSelect"; import { CapabilityBadge } from "./CapabilityBadge"; import type { @@ -25,21 +26,11 @@ import type { Model, ModelType, SimplifiedModel, - SupportsReasoningStyle, } from "../../../../../services/refact"; -import { extractHumanReadableReasoningType } from "../utils"; import { useEffectOnce } from "../../../../../hooks"; -import { FormSelect } from "./FormSelect"; import { Spinner } from "../../../../../components/Spinner"; -const SUPPORTED_REASONING_STYLES: SupportsReasoningStyle[] = [ - "openai", - "deepseek", - "anthropic", - null, -]; - export type ModelCardPopupProps = { minifiedModel?: SimplifiedModel; isOpen: boolean; @@ -374,28 +365,47 @@ const ChatModelFields: FC<ChatModelFieldsProps> = ({ .split("") .map((s) => (s === "." ? undefined : s)); - if (value > 1 || digits.length > 8) { - e.target.value = "1"; + if (value > 2 || digits.length > 8) { + e.target.value = "2"; } setEditedModelData({ ...editedModelData, type: "chat", default_temperature: - e.target.value === "" ? null : Math.min(parseFloat(e.target.value), 1), + e.target.value === "" ? null : Math.min(parseFloat(e.target.value), 2), + }); + }; + + const handleFrequencyPenaltyChange = ( + e: React.ChangeEvent<HTMLInputElement>, + ) => { + if (!editedModelData) return; + const value = parseFloat(e.target.value); + + if (value < -2 || value > 2) { + e.target.value = Math.max(-2, Math.min(2, value)).toString(); + } + + setEditedModelData({ + ...editedModelData, + type: "chat", + default_frequency_penalty: + e.target.value === "" + ? null + : Math.max(-2, Math.min(2, parseFloat(e.target.value))), }); }; - const handleReasoningStyleChange = (value: string) => { + const handleMaxTokensChange = (e: React.ChangeEvent<HTMLInputElement>) => { if (!editedModelData) return; + const value = parseInt(e.target.value, 10); setEditedModelData({ ...editedModelData, type: "chat", - supports_boost_reasoning: - value === "null" ? false : editedModelData.supports_boost_reasoning, - supports_reasoning: - value === "null" ? null : (value as SupportsReasoningStyle), + default_max_tokens: + e.target.value === "" || isNaN(value) ? null : Math.max(0, value), }); }; @@ -420,20 +430,27 @@ const ChatModelFields: FC<ChatModelFieldsProps> = ({ <FormField label="Default Temperature" value={editedModelData.default_temperature?.toString() ?? ""} - placeholder="Default temperature" + placeholder="Default temperature (0-2)" type="number" - max="1" + max="2" onChange={handleTemperatureChange} /> + <FormField + label="Default Frequency Penalty" + value={editedModelData.default_frequency_penalty?.toString() ?? ""} + placeholder="Default frequency penalty (-2 to 2)" + type="number" + onChange={handleFrequencyPenaltyChange} + /> + <FormField + label="Default Max Tokens" + value={editedModelData.default_max_tokens?.toString() ?? ""} + placeholder="Default max tokens" + type="number" + onChange={handleMaxTokensChange} + /> <Flex direction="column" gap="2"> - <FormSelect - label="Reasoning Style" - value={editedModelData.supports_reasoning ?? "null"} - onValueChange={handleReasoningStyleChange} - options={SUPPORTED_REASONING_STYLES} - optionTransformer={extractHumanReadableReasoningType} - /> <Text as="div" size="2" weight="bold"> Capabilities </Text> @@ -458,13 +475,11 @@ const ChatModelFields: FC<ChatModelFieldsProps> = ({ enabled={editedModelData.supports_agent} onClick={() => toggleCapability("supports_agent")} /> - {editedModelData.supports_reasoning && ( - <CapabilityBadge - name="Boost Reasoning" - enabled={!!editedModelData.supports_boost_reasoning} - onClick={() => toggleCapability("supports_boost_reasoning")} - /> - )} + <CapabilityBadge + name="Thinking Budget" + enabled={!!editedModelData.supports_thinking_budget} + onClick={() => toggleCapability("supports_thinking_budget")} + /> </Flex> </Flex> </> diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/index.ts b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/index.ts index 962a6b817..2329f748a 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/index.ts +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/index.ts @@ -1 +1,3 @@ export { ProviderModelsList } from "./ProviderModelsList"; +export { AvailableModelCard } from "./AvailableModelCard"; +export { AddCustomModelModal } from "./AddCustomModelModal"; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/extractHumanReadableReasoningType.ts b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/extractHumanReadableReasoningType.ts index e1b2020aa..cde7b9aa9 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/extractHumanReadableReasoningType.ts +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/extractHumanReadableReasoningType.ts @@ -1,26 +1,11 @@ -import { SupportsReasoningStyle } from "../../../../../services/refact"; -import { BEAUTIFUL_PROVIDER_NAMES } from "../../../constants"; - -export function isSupportsReasoningStyle( - data: string | null, -): data is SupportsReasoningStyle { +export function hasReasoningSupport(model: { + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; +}): boolean { return ( - data === "openai" || - data === "anthropic" || - data === "deepseek" || - data === null + !!model.reasoning_effort_options?.length || + !!model.supports_thinking_budget || + !!model.supports_adaptive_thinking_budget ); } - -export function extractHumanReadableReasoningType( - reasoningType: string | null, -) { - if (!isSupportsReasoningStyle(reasoningType)) return null; - if (!reasoningType) return null; - - const maybeReadableReasoningType = BEAUTIFUL_PROVIDER_NAMES[reasoningType]; - - return maybeReadableReasoningType - ? maybeReadableReasoningType - : reasoningType; -} diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/groupModelsWithPricing.ts b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/groupModelsWithPricing.ts index 652a66215..05758d4f8 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/groupModelsWithPricing.ts +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderModelsList/utils/groupModelsWithPricing.ts @@ -22,8 +22,9 @@ export type ModelCapabilities = { supportsMultimodality?: boolean; supportsClicks?: boolean; supportsAgent?: boolean; - supportsReasoning?: string | null; - supportsBoostReasoning?: boolean; + reasoningEffortOptions?: string[] | null; + supportsThinkingBudget?: boolean; + supportsAdaptiveThinkingBudget?: boolean; }; export type ModelGroup = { @@ -46,39 +47,31 @@ export function formatContextWindow(nCtx: number): string { return nCtx.toString(); } -/** - * Format pricing to compact string (in coins, $1 = 1000 coins) - */ export function formatPricing(cost: CapCost, compact = true): string { - // Convert dollars to coins ($1 = 1000 coins) - const toCoins = (n?: number) => - typeof n === "number" && Number.isFinite(n) ? Math.round(n * 1000) : null; - - const promptCoins = toCoins(cost.prompt); - const generatedCoins = toCoins(cost.generated); - - const fmt = (coins: number | null) => - coins !== null ? coins.toString() : "–"; + const fmt = (n?: number) => { + if (typeof n !== "number" || !Number.isFinite(n)) return "–"; + if (n >= 1) return `$${n.toFixed(2)}`; + if (n >= 0.01) return `$${n.toFixed(2)}`; + return `$${n.toFixed(3)}`; + }; if (compact) { - // Compact format for card display: "1000/5000 ⓒ" (prompt/output in coins) - return `${fmt(promptCoins)}/${fmt(generatedCoins)} ⓒ`; + return `${fmt(cost.prompt)}/${fmt(cost.generated)}`; } - // Detailed format for tooltip/popup const parts = [ - `prompt: ${fmt(promptCoins)} ⓒ`, - `output: ${fmt(generatedCoins)} ⓒ`, + `input: ${fmt(cost.prompt)}`, + `output: ${fmt(cost.generated)}`, ]; - const cacheReadCoins = toCoins(cost.cache_read); - const cacheCreationCoins = toCoins(cost.cache_creation); - - if (cacheReadCoins !== null) { - parts.push(`cache read: ${fmt(cacheReadCoins)} ⓒ`); + if (typeof cost.cache_read === "number" && Number.isFinite(cost.cache_read)) { + parts.push(`cache read: ${fmt(cost.cache_read)}`); } - if (cacheCreationCoins !== null) { - parts.push(`cache create: ${fmt(cacheCreationCoins)} ⓒ`); + if ( + typeof cost.cache_creation === "number" && + Number.isFinite(cost.cache_creation) + ) { + parts.push(`cache create: ${fmt(cost.cache_creation)}`); } return parts.join(" • ") + " per 1M tokens"; @@ -86,25 +79,49 @@ export function formatPricing(cost: CapCost, compact = true): string { /** * Try to find the pricing key in caps.metadata.pricing that corresponds to a given model. - * Based on actual /caps response, keys are bare model names (e.g., "gpt-4.1") + * Backend inserts pricing under both fully-qualified keys (provider/model) and bare model names. */ function pickPricingKey(args: { caps: CapsResponse; modelName: string; + providerName?: string; }): string | null { - const { caps, modelName } = args; + const { caps, modelName, providerName } = args; const pricing = caps.metadata?.pricing; if (!pricing) return null; - // Try exact match first (most common case) - if (Object.prototype.hasOwnProperty.call(pricing, modelName)) { + const hasKey = (key: string) => + Object.prototype.hasOwnProperty.call(pricing, key); + + // 1. Try exact match first (handles both bare and qualified names) + if (hasKey(modelName)) { return modelName; } - // Try without "refact/" prefix if present - const nameWithoutProvider = modelName.replace(/^refact\//, ""); - if (Object.prototype.hasOwnProperty.call(pricing, nameWithoutProvider)) { - return nameWithoutProvider; + // 2. Try fully-qualified key if we have provider context + if (providerName) { + const qualifiedKey = `${providerName}/${modelName}`; + if (hasKey(qualifiedKey)) { + return qualifiedKey; + } + } + + // 3. Try stripping any provider prefix (e.g., "openai/gpt-4o" -> "gpt-4o") + if (modelName.includes("/")) { + const bareModel = modelName.split("/").pop(); + if (bareModel && hasKey(bareModel)) { + return bareModel; + } + } + + // 4. For multi-slash names (e.g., "openrouter/anthropic/claude-3-5-sonnet"), + // try the last two segments as a key + const segments = modelName.split("/"); + if (segments.length > 2) { + const lastTwoSegments = segments.slice(-2).join("/"); + if (hasKey(lastTwoSegments)) { + return lastTwoSegments; + } } return null; @@ -123,8 +140,9 @@ function extractCapabilities( supportsMultimodality: capsModel.supports_multimodality, supportsClicks: capsModel.supports_clicks, supportsAgent: capsModel.supports_agent, - supportsReasoning: capsModel.supports_reasoning, - supportsBoostReasoning: capsModel.supports_boost_reasoning, + reasoningEffortOptions: capsModel.reasoning_effort_options, + supportsThinkingBudget: capsModel.supports_thinking_budget, + supportsAdaptiveThinkingBudget: capsModel.supports_adaptive_thinking_budget, }; } diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/ProviderOAuth.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderOAuth.tsx new file mode 100644 index 000000000..f28c65676 --- /dev/null +++ b/refact-agent/gui/src/features/Providers/ProviderForm/ProviderOAuth.tsx @@ -0,0 +1,336 @@ +import React, { useCallback, useEffect, useRef, useState } from "react"; +import { Button, Flex, Text, TextField } from "@radix-ui/themes"; +import { + useOauthStartMutation, + useOauthExchangeMutation, + useOauthLogoutMutation, + providersApi, +} from "../../../services/refact"; +import { useAppDispatch } from "../../../hooks"; +import { useOpenUrl } from "../../../hooks/useOpenUrl"; + +const PROVIDERS_WITH_AUTO_CALLBACK = ["openai_codex"]; + +const PROVIDER_LOGIN_LABELS: Record<string, string> = { + claude_code: "Login with Anthropic", + openai_codex: "Login with OpenAI", +}; + +type ProviderOAuthProps = { + providerName: string; + oauthConnected: boolean; + authStatus: string; +}; + +export const ProviderOAuth: React.FC<ProviderOAuthProps> = ({ + providerName, + oauthConnected, + authStatus, +}) => { + const dispatch = useAppDispatch(); + const openUrl = useOpenUrl(); + const [oauthStart] = useOauthStartMutation(); + const [oauthExchange] = useOauthExchangeMutation(); + const [oauthLogout] = useOauthLogoutMutation(); + + const [sessionId, setSessionId] = useState<string | null>(null); + const [authorizeUrl, setAuthorizeUrl] = useState<string | null>(null); + const [code, setCode] = useState(""); + const [error, setError] = useState<string | null>(null); + const [isLoading, setIsLoading] = useState(false); + const [waitingForCallback, setWaitingForCallback] = useState(false); + const pollTimerRef = useRef<ReturnType<typeof setInterval> | null>(null); + + const isAutoCallback = PROVIDERS_WITH_AUTO_CALLBACK.includes(providerName); + const loginLabel = PROVIDER_LOGIN_LABELS[providerName] || "Login"; + + const invalidateProvider = useCallback(() => { + dispatch( + providersApi.util.invalidateTags([ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + { type: "AVAILABLE_MODELS", id: providerName }, + ]), + ); + }, [dispatch, providerName]); + + useEffect(() => { + return () => { + if (pollTimerRef.current) { + clearInterval(pollTimerRef.current); + } + }; + }, []); + + const handleStartOAuth = async () => { + setError(null); + setIsLoading(true); + try { + const result = await oauthStart({ providerName, mode: "max" }).unwrap(); + setSessionId(result.session_id); + setAuthorizeUrl(result.authorize_url); + openUrl(result.authorize_url); + + if (isAutoCallback) { + setWaitingForCallback(true); + pollTimerRef.current = setInterval(() => { + invalidateProvider(); + }, 2000); + } + } catch (e) { + setError(e instanceof Error ? e.message : "Failed to start OAuth"); + } finally { + setIsLoading(false); + } + }; + + useEffect(() => { + if (waitingForCallback && oauthConnected) { + setWaitingForCallback(false); + setSessionId(null); + setAuthorizeUrl(null); + if (pollTimerRef.current) { + clearInterval(pollTimerRef.current); + pollTimerRef.current = null; + } + } + }, [waitingForCallback, oauthConnected]); + + // If backend updated auth_status to a terminal error while we were polling, + // stop waiting and let the user see the status. + useEffect(() => { + if (!waitingForCallback) return; + if (!authStatus) return; + if (/failed|error|unavailable|missing/i.test(authStatus)) { + setWaitingForCallback(false); + if (pollTimerRef.current) { + clearInterval(pollTimerRef.current); + pollTimerRef.current = null; + } + } + }, [waitingForCallback, authStatus]); + + const handleExchangeCode = async () => { + if (!sessionId || !code.trim()) return; + setError(null); + setIsLoading(true); + try { + await oauthExchange({ + providerName, + session_id: sessionId, + code: code.trim(), + }).unwrap(); + setSessionId(null); + setAuthorizeUrl(null); + setCode(""); + invalidateProvider(); + } catch (e) { + setError(e instanceof Error ? e.message : "Failed to exchange code"); + } finally { + setIsLoading(false); + } + }; + + const handleLogout = async () => { + setError(null); + setIsLoading(true); + try { + await oauthLogout({ providerName }).unwrap(); + setSessionId(null); + setAuthorizeUrl(null); + setCode(""); + } catch (e) { + setError(e instanceof Error ? e.message : "Failed to logout"); + } finally { + setIsLoading(false); + } + }; + + const handleCancel = () => { + setSessionId(null); + setAuthorizeUrl(null); + setCode(""); + setWaitingForCallback(false); + if (pollTimerRef.current) { + clearInterval(pollTimerRef.current); + pollTimerRef.current = null; + } + }; + + if (oauthConnected) { + return ( + <Flex + direction="column" + gap="2" + p="3" + style={{ + border: "1px solid var(--gray-6)", + borderRadius: "var(--radius-2)", + }} + > + <Flex align="center" justify="between"> + <Flex align="center" gap="2"> + <Text size="2" weight="medium" color="green"> + ● Connected + </Text> + <Text size="1" color="gray"> + {authStatus} + </Text> + </Flex> + <Button + variant="ghost" + color="red" + size="1" + disabled={isLoading} + onClick={() => void handleLogout()} + > + Disconnect + </Button> + </Flex> + </Flex> + ); + } + + if (sessionId && authorizeUrl) { + if (isAutoCallback && waitingForCallback) { + return ( + <Flex + direction="column" + gap="2" + p="3" + style={{ + border: "1px solid var(--gray-6)", + borderRadius: "var(--radius-2)", + }} + > + <Text size="2" weight="medium"> + Waiting for authentication... + </Text> + <Text size="1" color="gray"> + Complete the login in the browser window that opened. This page will + update automatically. + </Text> + <Flex gap="2" align="center"> + <Text size="1" color="gray"> + Browser didn&apos;t open?{" "} + <a + href="#" + onClick={(e) => { + e.preventDefault(); + if (authorizeUrl) openUrl(authorizeUrl); + }} + style={{ color: "var(--accent-9)" }} + > + Click here + </a> + </Text> + <Button + variant="ghost" + size="1" + color="gray" + onClick={handleCancel} + > + Cancel + </Button> + </Flex> + {error && ( + <Text size="1" color="red"> + {error} + </Text> + )} + </Flex> + ); + } + + return ( + <Flex + direction="column" + gap="2" + p="3" + style={{ + border: "1px solid var(--gray-6)", + borderRadius: "var(--radius-2)", + }} + > + <Text size="2" weight="medium"> + Paste the authorization code + </Text> + <Text size="1" color="gray"> + A browser window should have opened. Log in and copy the code shown on + the page. + </Text> + <Flex gap="2"> + <TextField.Root + style={{ flex: 1 }} + placeholder="Paste code here..." + value={code} + onChange={(e) => setCode(e.target.value)} + onKeyDown={(e) => { + if (e.key === "Enter") void handleExchangeCode(); + }} + /> + <Button + variant="solid" + disabled={isLoading || !code.trim()} + onClick={() => void handleExchangeCode()} + > + {isLoading ? "Connecting..." : "Connect"} + </Button> + </Flex> + <Flex gap="2" align="center"> + <Text size="1" color="gray"> + Browser didn&apos;t open?{" "} + <a + href="#" + onClick={(e) => { + e.preventDefault(); + if (authorizeUrl) openUrl(authorizeUrl); + }} + style={{ color: "var(--accent-9)" }} + > + Click here + </a> + </Text> + <Button variant="ghost" size="1" color="gray" onClick={handleCancel}> + Cancel + </Button> + </Flex> + {error && ( + <Text size="1" color="red"> + {error} + </Text> + )} + </Flex> + ); + } + + return ( + <Flex + direction="column" + gap="2" + p="3" + style={{ + border: "1px solid var(--gray-6)", + borderRadius: "var(--radius-2)", + }} + > + <Flex align="center" justify="between"> + <Text size="2" weight="medium"> + {loginLabel} + </Text> + <Button + variant="solid" + disabled={isLoading} + onClick={() => void handleStartOAuth()} + > + {isLoading ? "Starting..." : "Login"} + </Button> + </Flex> + {error && ( + <Text size="1" color="red"> + {error} + </Text> + )} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/SchemaField.tsx b/refact-agent/gui/src/features/Providers/ProviderForm/SchemaField.tsx new file mode 100644 index 000000000..c57aa1b68 --- /dev/null +++ b/refact-agent/gui/src/features/Providers/ProviderForm/SchemaField.tsx @@ -0,0 +1,383 @@ +import React, { useState, useCallback, useRef, useEffect } from "react"; +import { + Button, + Flex, + Switch, + Text, + TextField, + TextArea, +} from "@radix-ui/themes"; +import { + ExternalLinkIcon, + EyeOpenIcon, + EyeClosedIcon, + Cross2Icon, + CheckIcon, +} from "@radix-ui/react-icons"; +import styles from "./ProviderForm.module.css"; + +export type SchemaFieldDef = { + key: string; + f_type: string; + f_desc?: string; + f_label?: string; + f_placeholder?: string; + f_default?: string; + f_extra?: boolean; + f_secret?: boolean; + smartlinks?: { sl_label: string; sl_goto: string }[]; +}; + +type FieldSaveState = "idle" | "saving" | "saved" | "error"; + +export type SchemaFieldProps = { + field: SchemaFieldDef; + value: unknown; + disabled?: boolean; + onSave: (key: string, value: unknown) => Promise<void>; +}; + +export const SchemaField: React.FC<SchemaFieldProps> = ({ + field, + value, + disabled = false, + onSave, +}) => { + const isSecret = + field.f_secret === true || + field.key.toLowerCase().includes("key") || + field.key.toLowerCase().includes("token") || + field.key.toLowerCase().includes("secret"); + + if (field.f_type === "boolean") { + return ( + <BooleanField + field={field} + value={value} + disabled={disabled} + onSave={onSave} + /> + ); + } + + if (isSecret) { + return ( + <SecretField + field={field} + value={value} + disabled={disabled} + onSave={onSave} + /> + ); + } + + return ( + <StringField + field={field} + value={value} + disabled={disabled} + onSave={onSave} + /> + ); +}; + +const BooleanField: React.FC<SchemaFieldProps> = ({ + field, + value, + disabled, + onSave, +}) => { + const [saveState, setSaveState] = useState<FieldSaveState>("idle"); + const timerRef = useRef<ReturnType<typeof setTimeout>>(); + useEffect(() => () => clearTimeout(timerRef.current), []); + + const handleChange = useCallback( + async (checked: boolean) => { + setSaveState("saving"); + try { + await onSave(field.key, checked); + setSaveState("saved"); + timerRef.current = setTimeout(() => setSaveState("idle"), 1500); + } catch { + setSaveState("error"); + timerRef.current = setTimeout(() => setSaveState("idle"), 2000); + } + }, + [field.key, onSave], + ); + + return ( + <Flex direction="column" gap="1"> + <Flex align="center" justify="between" gap="3"> + <Flex direction="column" gap="0"> + <label htmlFor={field.key}> + <Text size="2" weight="medium"> + {field.f_label ?? field.key} + </Text> + </label> + {field.f_desc && ( + <Text size="1" color="gray"> + {field.f_desc} + </Text> + )} + </Flex> + <Flex align="center" gap="2"> + <SaveIndicator state={saveState} /> + <Switch + id={field.key} + checked={Boolean(value)} + disabled={disabled} + onCheckedChange={(checked) => void handleChange(checked)} + /> + </Flex> + </Flex> + </Flex> + ); +}; + +const SecretField: React.FC<SchemaFieldProps> = ({ + field, + value, + disabled, + onSave, +}) => { + const isMasked = value === "***"; + const [localValue, setLocalValue] = useState(""); + const [revealed, setRevealed] = useState(false); + const [saveState, setSaveState] = useState<FieldSaveState>("idle"); + const [editing, setEditing] = useState(false); + const originalValueRef = useRef(value); + const timerRef = useRef<ReturnType<typeof setTimeout>>(); + useEffect(() => () => clearTimeout(timerRef.current), []); + + useEffect(() => { + originalValueRef.current = value; + if (!editing) { + setLocalValue(""); + } + }, [value, editing]); + + const handleBlur = useCallback(async () => { + if (!editing) return; + if (localValue === "" && isMasked) { + setEditing(false); + return; + } + if (localValue === String(originalValueRef.current)) { + setEditing(false); + return; + } + setSaveState("saving"); + try { + await onSave(field.key, localValue); + setSaveState("saved"); + setEditing(false); + timerRef.current = setTimeout(() => setSaveState("idle"), 1500); + } catch { + setSaveState("error"); + timerRef.current = setTimeout(() => setSaveState("idle"), 2000); + } + }, [editing, localValue, isMasked, field.key, onSave]); + + const handleClear = useCallback(async () => { + setSaveState("saving"); + try { + await onSave(field.key, ""); + setLocalValue(""); + setEditing(false); + setSaveState("saved"); + timerRef.current = setTimeout(() => setSaveState("idle"), 1500); + } catch { + setSaveState("error"); + timerRef.current = setTimeout(() => setSaveState("idle"), 2000); + } + }, [field.key, onSave]); + + const displayValue = editing + ? localValue + : isMasked + ? "" + : String(value ?? ""); + const placeholder = + isMasked && !editing ? "•••••••• (saved)" : field.f_placeholder ?? ""; + + return ( + <Flex direction="column" gap="1"> + <Flex align="center" justify="between"> + <Flex direction="column" gap="0"> + <Text size="2" weight="medium"> + {field.f_label ?? field.key} + </Text> + {field.f_desc && ( + <Text size="1" color="gray"> + {field.f_desc} + </Text> + )} + </Flex> + <Flex align="center" gap="1"> + <SaveIndicator state={saveState} /> + {field.smartlinks?.map((link) => ( + <Button key={link.sl_goto} variant="ghost" size="1" asChild> + <a href={link.sl_goto} target="_blank" rel="noopener noreferrer"> + <ExternalLinkIcon width={12} height={12} /> + <Text size="1">{link.sl_label}</Text> + </a> + </Button> + ))} + </Flex> + </Flex> + <Flex gap="1" align="center"> + <TextField.Root + style={{ flex: 1 }} + id={field.key} + type={revealed ? "text" : "password"} + value={displayValue} + placeholder={placeholder} + disabled={disabled} + onFocus={() => setEditing(true)} + onChange={(e) => setLocalValue(e.target.value)} + onBlur={() => void handleBlur()} + onKeyDown={(e) => { + if (e.key === "Enter") e.currentTarget.blur(); + }} + className={disabled ? styles.disabledField : undefined} + /> + <Button + variant="ghost" + size="1" + onClick={() => setRevealed(!revealed)} + title={revealed ? "Hide" : "Reveal"} + > + {revealed ? <EyeClosedIcon /> : <EyeOpenIcon />} + </Button> + {isMasked && !editing && ( + <Button + variant="ghost" + size="1" + color="red" + onClick={() => void handleClear()} + title="Clear saved value" + > + <Cross2Icon /> + </Button> + )} + </Flex> + </Flex> + ); +}; + +const StringField: React.FC<SchemaFieldProps> = ({ + field, + value, + disabled, + onSave, +}) => { + const [localValue, setLocalValue] = useState( + String(value ?? field.f_default ?? ""), + ); + const [saveState, setSaveState] = useState<FieldSaveState>("idle"); + const originalValueRef = useRef(value); + const timerRef = useRef<ReturnType<typeof setTimeout>>(); + useEffect(() => () => clearTimeout(timerRef.current), []); + + useEffect(() => { + originalValueRef.current = value; + setLocalValue(String(value ?? field.f_default ?? "")); + }, [value, field.f_default]); + + const handleBlur = useCallback(async () => { + if (localValue === String(originalValueRef.current ?? "")) return; + setSaveState("saving"); + try { + await onSave(field.key, localValue); + setSaveState("saved"); + timerRef.current = setTimeout(() => setSaveState("idle"), 1500); + } catch { + setSaveState("error"); + timerRef.current = setTimeout(() => setSaveState("idle"), 2000); + } + }, [localValue, field.key, onSave]); + + const isLong = field.f_type === "string_long" || localValue.length > 80; + + return ( + <Flex direction="column" gap="1"> + <Flex align="center" justify="between"> + <Flex direction="column" gap="0"> + <Text size="2" weight="medium"> + {field.f_label ?? field.key} + </Text> + {field.f_desc && ( + <Text size="1" color="gray"> + {field.f_desc} + </Text> + )} + </Flex> + <Flex align="center" gap="1"> + <SaveIndicator state={saveState} /> + {field.smartlinks?.map((link) => ( + <Button key={link.sl_goto} variant="ghost" size="1" asChild> + <a href={link.sl_goto} target="_blank" rel="noopener noreferrer"> + <ExternalLinkIcon width={12} height={12} /> + <Text size="1">{link.sl_label}</Text> + </a> + </Button> + ))} + </Flex> + </Flex> + {isLong ? ( + <TextArea + id={field.key} + value={localValue} + placeholder={field.f_placeholder ?? ""} + disabled={disabled} + onChange={(e) => setLocalValue(e.target.value)} + onBlur={() => void handleBlur()} + className={disabled ? styles.disabledField : undefined} + rows={2} + /> + ) : ( + <TextField.Root + id={field.key} + value={localValue} + placeholder={field.f_placeholder ?? ""} + disabled={disabled} + onChange={(e) => setLocalValue(e.target.value)} + onBlur={() => void handleBlur()} + onKeyDown={(e) => { + if (e.key === "Enter") e.currentTarget.blur(); + }} + className={disabled ? styles.disabledField : undefined} + /> + )} + </Flex> + ); +}; + +const SaveIndicator: React.FC<{ state: FieldSaveState }> = ({ state }) => { + switch (state) { + case "idle": + return null; + case "saving": + return ( + <Text size="1" color="gray"> + Saving… + </Text> + ); + case "saved": + return ( + <Flex align="center" gap="1"> + <CheckIcon width={12} height={12} color="var(--green-9)" /> + <Text size="1" color="green"> + Saved + </Text> + </Flex> + ); + case "error": + return ( + <Text size="1" color="red"> + Error + </Text> + ); + } +}; diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/useProviderForm.ts b/refact-agent/gui/src/features/Providers/ProviderForm/useProviderForm.ts index ff4ed57ad..9db12fb74 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/useProviderForm.ts +++ b/refact-agent/gui/src/features/Providers/ProviderForm/useProviderForm.ts @@ -1,52 +1,148 @@ -import isEqual from "lodash.isequal"; import { useCallback, useEffect, useMemo, useState } from "react"; -import type { Provider } from "../../../services/refact"; +import type { ProviderDetailResponse } from "../../../services/refact"; import { useGetConfiguredProvidersQuery, useGetProviderQuery, } from "../../../hooks/useProvidersQuery"; +import { + providersApi, + useGetProviderSchemaQuery, +} from "../../../services/refact"; +import { useAppDispatch } from "../../../hooks"; +import type { SchemaFieldDef } from "./SchemaField"; + +export type ProviderFormValues = { + enabled: boolean; + readonly: boolean; + [key: string]: unknown; +}; + +type ParsedSchema = { + fields: SchemaFieldDef[]; + oauth?: { + supported: boolean; + methods?: { id: string; label: string; description?: string }[]; + }; + description?: string; +}; + +const jsYamlPromise = import("js-yaml"); + +async function parseSchema(yamlStr: string): Promise<ParsedSchema> { + const jsYaml = await jsYamlPromise; + const parsed = jsYaml.load(yamlStr) as Record<string, unknown> | null; + if (!parsed || typeof parsed !== "object") { + return { fields: [] }; + } + + const fields: SchemaFieldDef[] = []; + const rawFields = parsed.fields as + | Record<string, Record<string, unknown>> + | undefined; + if (rawFields && typeof rawFields === "object") { + for (const [key, def] of Object.entries(rawFields)) { + fields.push({ + key, + f_type: String(def.f_type ?? "string"), + f_desc: def.f_desc ? String(def.f_desc) : undefined, + f_label: def.f_label ? String(def.f_label) : undefined, + f_placeholder: def.f_placeholder + ? String(def.f_placeholder) + : undefined, + f_default: def.f_default ? String(def.f_default) : undefined, + f_extra: Boolean(def.f_extra), + f_secret: Boolean(def.f_secret), + smartlinks: Array.isArray(def.smartlinks) + ? def.smartlinks.map((sl: Record<string, unknown>) => ({ + sl_label: String(sl.sl_label ?? ""), + sl_goto: String(sl.sl_goto ?? ""), + })) + : undefined, + }); + } + } + + const oauth = parsed.oauth as ParsedSchema["oauth"] | undefined; + const description = parsed.description + ? String(parsed.description) + : undefined; + + return { fields, oauth, description }; +} export function useProviderForm({ providerName }: { providerName: string }) { - const { data: detailedProvider, isSuccess: isProviderLoadedSuccessfully } = - useGetProviderQuery({ - providerName: providerName, - }); + const dispatch = useAppDispatch(); + const { data: providerDetail, isSuccess: isProviderLoadedSuccessfully } = + useGetProviderQuery({ providerName }); + const { data: schemaData } = useGetProviderSchemaQuery({ providerName }); const { data: configuredProviders } = useGetConfiguredProvidersQuery(); - const [formValues, setFormValues] = useState<Provider | null>(null); + const [parsedSchema, setParsedSchema] = useState<ParsedSchema | null>(null); const [areShowingExtraFields, setAreShowingExtraFields] = useState(false); useEffect(() => { - if (detailedProvider) { - setFormValues(detailedProvider); + if (schemaData?.schema) { + void parseSchema(schemaData.schema).then(setParsedSchema); } - }, [detailedProvider]); + }, [schemaData?.schema]); - const shouldSaveButtonBeDisabled = useMemo(() => { - if (!detailedProvider) return true; + const formValues: ProviderFormValues | null = useMemo(() => { + if (!providerDetail) return null; + return { + enabled: providerDetail.enabled, + readonly: providerDetail.readonly, + ...providerDetail.settings, + }; + }, [providerDetail]); - const isProviderConfigured = configuredProviders?.providers.some( - (p) => p.name === providerName, - ); - if (!isProviderConfigured) return false; + const { importantFields, extraFields } = useMemo(() => { + if (!parsedSchema) return { importantFields: [], extraFields: [] }; + const important: SchemaFieldDef[] = []; + const extra: SchemaFieldDef[] = []; + for (const field of parsedSchema.fields) { + if (field.f_extra) { + extra.push(field); + } else { + important.push(field); + } + } + return { importantFields: important, extraFields: extra }; + }, [parsedSchema]); - return detailedProvider.readonly || isEqual(formValues, detailedProvider); - }, [configuredProviders, detailedProvider, formValues, providerName]); + const [updateProvider] = providersApi.useUpdateProviderMutation(); - const handleFormValuesChange = useCallback( - (updatedProviderData: Provider) => { - setFormValues(updatedProviderData); + const handleFieldSave = useCallback( + async (key: string, value: unknown) => { + if (!providerDetail) return; + // Send only the changed field (patch semantics) — backend merges with existing YAML + const response = await updateProvider({ + providerName, + settings: { [key]: value }, + }); + if (response.error) { + throw new Error("Failed to save"); + } + dispatch( + providersApi.util.invalidateTags([ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + { type: "AVAILABLE_MODELS", id: providerName }, + ]), + ); }, - [], + [providerDetail, providerName, updateProvider, dispatch], ); + const detailedProvider: ProviderDetailResponse | undefined = providerDetail; + return { formValues, - setFormValues, + parsedSchema, + importantFields, + extraFields, areShowingExtraFields, setAreShowingExtraFields, - shouldSaveButtonBeDisabled, - handleFormValuesChange, + handleFieldSave, configuredProviders, detailedProvider, isProviderLoadedSuccessfully, diff --git a/refact-agent/gui/src/features/Providers/ProviderForm/utils.ts b/refact-agent/gui/src/features/Providers/ProviderForm/utils.ts index 3157bd6a4..152b4cb20 100644 --- a/refact-agent/gui/src/features/Providers/ProviderForm/utils.ts +++ b/refact-agent/gui/src/features/Providers/ProviderForm/utils.ts @@ -1,4 +1,4 @@ -import type { Provider } from "../../../services/refact"; +import type { ProviderFormValues } from "./useProviderForm"; export type AggregatedProviderFields = { importantFields: Record<string, string | boolean>; @@ -11,6 +11,7 @@ const EXTRA_FIELDS_KEYS = [ "chat_endpoint", "tokenizer_api_key", ]; + const HIDDEN_FIELDS_KEYS = [ "name", "readonly", @@ -18,19 +19,23 @@ const HIDDEN_FIELDS_KEYS = [ "supports_completion", ]; -export function aggregateProviderFields(providerData: Provider) { +export function aggregateProviderFields(providerData: ProviderFormValues) { return Object.entries(providerData).reduce<AggregatedProviderFields>( (acc, [key, value]) => { - const stringValue = value; - if (HIDDEN_FIELDS_KEYS.some((hiddenField) => hiddenField === key)) { return acc; } + if (typeof value === "object" && value !== null) { + return acc; + } + + const fieldValue = value as string | boolean; + if (EXTRA_FIELDS_KEYS.some((extraField) => extraField === key)) { - acc.extraFields[key] = stringValue; + acc.extraFields[key] = fieldValue; } else { - acc.importantFields[key] = stringValue; + acc.importantFields[key] = fieldValue; } return acc; diff --git a/refact-agent/gui/src/features/Providers/ProviderPreview/ProviderPreview.tsx b/refact-agent/gui/src/features/Providers/ProviderPreview/ProviderPreview.tsx index 0a4be0bc5..0c8e95721 100644 --- a/refact-agent/gui/src/features/Providers/ProviderPreview/ProviderPreview.tsx +++ b/refact-agent/gui/src/features/Providers/ProviderPreview/ProviderPreview.tsx @@ -3,38 +3,46 @@ import { Flex, Heading } from "@radix-ui/themes"; import { ProviderForm } from "../ProviderForm"; -import { useProviderPreview } from "./useProviderPreview"; import { getProviderName } from "../getProviderName"; -import type { SimplifiedProvider } from "../../../services/refact"; +import type { ProviderListItem } from "../../../services/refact"; import { DeletePopover } from "../../../components/DeletePopover"; +import { useDeleteProviderMutation } from "../../../hooks/useProvidersQuery"; +import { useAppDispatch } from "../../../hooks"; +import { setInformation } from "../../Errors/informationSlice"; +import { providersApi } from "../../../services/refact"; + +const UNDELETABLE_PROVIDERS = ["refact", "refact_self_hosted"]; export type ProviderPreviewProps = { - configuredProviders: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - >[]; - currentProvider: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - >; - handleSetCurrentProvider: ( - provider: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - > | null, - ) => void; + configuredProviders: ProviderListItem[]; + currentProvider: ProviderListItem; + handleSetCurrentProvider: (provider: ProviderListItem | null) => void; }; export const ProviderPreview: React.FC<ProviderPreviewProps> = ({ - configuredProviders, currentProvider, handleSetCurrentProvider, }) => { - const { - handleDiscardChanges, - handleSaveChanges, - handleDeleteProvider, - isDeletingProvider, - isSavingProvider, - } = useProviderPreview(handleSetCurrentProvider); + const dispatch = useAppDispatch(); + const [deleteProvider, { isLoading: isDeletingProvider }] = + useDeleteProviderMutation(); + + const showDelete = !UNDELETABLE_PROVIDERS.includes(currentProvider.name); + + const handleDeleteProvider = async (providerName: string) => { + const response = await deleteProvider(providerName); + if (response.error) return; + dispatch( + setInformation( + `${getProviderName( + providerName, + )}'s Provider configuration was deleted successfully`, + ), + ); + dispatch(providersApi.util.resetApiState()); + handleSetCurrentProvider(null); + }; return ( <Flex direction="column" align="start" height="100%"> @@ -42,27 +50,19 @@ export const ProviderPreview: React.FC<ProviderPreviewProps> = ({ <Heading as="h2" size="3"> {getProviderName(currentProvider)} Configuration </Heading> - <DeletePopover - itemName={getProviderName(currentProvider)} - isDisabled={currentProvider.readonly} - isDeleting={isDeletingProvider} - deleteBy={currentProvider.name} - handleDelete={(providerName: string) => - void handleDeleteProvider(providerName) - } - /> - </Flex> - <ProviderForm - currentProvider={currentProvider} - handleSaveChanges={(updatedProviderData) => - void handleSaveChanges(updatedProviderData) - } - isSaving={isSavingProvider} - isProviderConfigured={configuredProviders.some( - (p) => p.name === currentProvider.name, + {showDelete && ( + <DeletePopover + itemName={getProviderName(currentProvider)} + isDisabled={currentProvider.readonly} + isDeleting={isDeletingProvider} + deleteBy={currentProvider.name} + handleDelete={(providerName: string) => + void handleDeleteProvider(providerName) + } + /> )} - handleDiscardChanges={handleDiscardChanges} - /> + </Flex> + <ProviderForm currentProvider={currentProvider} /> </Flex> ); }; diff --git a/refact-agent/gui/src/features/Providers/ProviderPreview/useProviderPreview.ts b/refact-agent/gui/src/features/Providers/ProviderPreview/useProviderPreview.ts index 1814e85be..2974f5956 100644 --- a/refact-agent/gui/src/features/Providers/ProviderPreview/useProviderPreview.ts +++ b/refact-agent/gui/src/features/Providers/ProviderPreview/useProviderPreview.ts @@ -7,44 +7,44 @@ import { } from "../../../hooks/useProvidersQuery"; import { setInformation } from "../../Errors/informationSlice"; -import { providersApi } from "../../../services/refact"; +import { providersApi, ProviderListItem } from "../../../services/refact"; import { getProviderName } from "../getProviderName"; -import type { Provider, SimplifiedProvider } from "../../../services/refact"; +import type { ProviderFormValues } from "../ProviderForm/useProviderForm"; export function useProviderPreview( - handleSetCurrentProvider: ( - provider: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - > | null, - ) => void, + handleSetCurrentProvider: (provider: ProviderListItem | null) => void, ) { const dispatch = useAppDispatch(); const [isSavingProvider, setIsSavingProvider] = useState(false); const [isDeletingProvider, setIsDeletingProvider] = useState(false); + const [currentProviderName, setCurrentProviderName] = useState<string>(""); - const updateProvider = useUpdateProviderMutation(); - const deleteProvider = useDeleteProviderMutation(); + const [updateProvider] = useUpdateProviderMutation(); + const [deleteProvider] = useDeleteProviderMutation(); const handleSaveChanges = useCallback( - async (updatedProviderData: Provider) => { + async (updatedProviderData: ProviderFormValues, providerName: string) => { setIsSavingProvider(true); - const response = await updateProvider(updatedProviderData); + setCurrentProviderName(providerName); + + const { enabled, readonly: _readonly, ...settings } = updatedProviderData; + const response = await updateProvider({ + providerName, + settings: { ...settings, enabled }, + }); + if (response.error) { setIsSavingProvider(false); return; } const actions = [ - setInformation( - `Provider ${getProviderName( - updatedProviderData, - )} updated successfully`, - ), + setInformation(`Provider ${providerName} updated successfully`), providersApi.util.invalidateTags([ "PROVIDER", - { type: "CONFIGURED_PROVIDERS", id: "LIST" }, + { type: "PROVIDERS", id: "LIST" }, ]), ]; actions.forEach((action) => dispatch(action)); @@ -90,5 +90,6 @@ export function useProviderPreview( handleSaveChanges, isSavingProvider, isDeletingProvider, + currentProviderName, }; } diff --git a/refact-agent/gui/src/features/Providers/Providers.tsx b/refact-agent/gui/src/features/Providers/Providers.tsx index 5b6b83413..98d9bf338 100644 --- a/refact-agent/gui/src/features/Providers/Providers.tsx +++ b/refact-agent/gui/src/features/Providers/Providers.tsx @@ -1,12 +1,10 @@ import React from "react"; -import { Flex, Button } from "@radix-ui/themes"; -import { ArrowLeftIcon } from "@radix-ui/react-icons"; +import { Flex } from "@radix-ui/themes"; import { ScrollArea } from "../../components/ScrollArea"; import { PageWrapper } from "../../components/PageWrapper"; import { Spinner } from "../../components/Spinner"; import { ProvidersView } from "./ProvidersView"; -import { ProviderUpdateProvider } from "./ProviderUpdateContext"; import { useGetConfiguredProvidersQuery } from "../../hooks/useProvidersQuery"; @@ -20,7 +18,6 @@ export type ProvidersProps = { export const Providers: React.FC<ProvidersProps> = ({ backFromProviders, host, - tabbed, }) => { const { data: configuredProvidersData, isSuccess } = useGetConfiguredProvidersQuery(); @@ -34,18 +31,6 @@ export const Providers: React.FC<ProvidersProps> = ({ marginTop: 0, }} > - {host === "vscode" && !tabbed ? ( - <Flex gap="2" pb="3"> - <Button variant="surface" onClick={backFromProviders}> - <ArrowLeftIcon width="16" height="16" /> - Back - </Button> - </Flex> - ) : ( - <Button mr="auto" variant="outline" onClick={backFromProviders} mb="4"> - Back - </Button> - )} <ScrollArea scrollbars="vertical" fullHeight> <Flex direction="column" @@ -56,11 +41,10 @@ export const Providers: React.FC<ProvidersProps> = ({ height: "100%", }} > - <ProviderUpdateProvider> - <ProvidersView - configuredProviders={configuredProvidersData.providers} - /> - </ProviderUpdateProvider> + <ProvidersView + configuredProviders={configuredProvidersData.providers} + backFromProviders={backFromProviders} + /> </Flex> </ScrollArea> </PageWrapper> diff --git a/refact-agent/gui/src/features/Providers/ProvidersView/ConfiguredProvidersView.tsx b/refact-agent/gui/src/features/Providers/ProvidersView/ConfiguredProvidersView.tsx index d142abe7f..74c5babb5 100644 --- a/refact-agent/gui/src/features/Providers/ProvidersView/ConfiguredProvidersView.tsx +++ b/refact-agent/gui/src/features/Providers/ProvidersView/ConfiguredProvidersView.tsx @@ -1,31 +1,21 @@ import React from "react"; -import { Button, Flex, Heading, Select, Text } from "@radix-ui/themes"; +import { Flex, Heading, Text } from "@radix-ui/themes"; import { ProviderCard } from "../ProviderCard/ProviderCard"; -import type { ConfiguredProvidersResponse } from "../../../services/refact"; -import { getProviderName } from "../getProviderName"; +import type { ProviderListItem } from "../../../services/refact"; import { useGetConfiguredProvidersView } from "./useConfiguredProvidersView"; export type ConfiguredProvidersViewProps = { - configuredProviders: ConfiguredProvidersResponse["providers"]; - handleSetCurrentProvider: ( - provider: ConfiguredProvidersResponse["providers"][number], - ) => void; + configuredProviders: ProviderListItem[]; + handleSetCurrentProvider: (provider: ProviderListItem) => void; }; export const ConfiguredProvidersView: React.FC< ConfiguredProvidersViewProps > = ({ configuredProviders, handleSetCurrentProvider }) => { - const { - handleAddNewProvider, - handlePotentialCurrentProvider, - notConfiguredProviderTemplates, - sortedConfiguredProviders, - potentialCurrentProvider, - } = useGetConfiguredProvidersView({ + const { sortedConfiguredProviders } = useGetConfiguredProvidersView({ configuredProviders, - handleSetCurrentProvider, }); return ( @@ -48,34 +38,6 @@ export const ConfiguredProvidersView: React.FC< /> ))} </Flex> - {notConfiguredProviderTemplates.length > 0 && ( - <Flex direction="column" gap="2"> - <Heading as="h3" size="3"> - Add new provider - </Heading> - <Select.Root - defaultValue={notConfiguredProviderTemplates[0].name} - size="2" - onValueChange={handlePotentialCurrentProvider} - > - <Select.Trigger /> - <Select.Content variant="solid" position="popper"> - {notConfiguredProviderTemplates.map((provider) => { - return ( - <Select.Item key={provider.name} value={provider.name}> - {getProviderName(provider)} - </Select.Item> - ); - })} - </Select.Content> - </Select.Root> - {potentialCurrentProvider && ( - <Button variant="outline" onClick={handleAddNewProvider}> - Configure {getProviderName(potentialCurrentProvider)} - </Button> - )} - </Flex> - )} </Flex> ); }; diff --git a/refact-agent/gui/src/features/Providers/ProvidersView/ProvidersView.tsx b/refact-agent/gui/src/features/Providers/ProvidersView/ProvidersView.tsx index ec24f3fc7..011128f77 100644 --- a/refact-agent/gui/src/features/Providers/ProvidersView/ProvidersView.tsx +++ b/refact-agent/gui/src/features/Providers/ProvidersView/ProvidersView.tsx @@ -1,12 +1,10 @@ import React, { useCallback, useState } from "react"; -import { Flex } from "@radix-ui/themes"; +import { Button, Flex } from "@radix-ui/themes"; +import { ArrowLeftIcon } from "@radix-ui/react-icons"; import { ConfiguredProvidersView } from "./ConfiguredProvidersView"; -import type { - ConfiguredProvidersResponse, - SimplifiedProvider, -} from "../../../services/refact"; +import type { ProviderListItem } from "../../../services/refact"; import { ProviderPreview } from "../ProviderPreview"; import { ErrorCallout, @@ -24,11 +22,13 @@ import styles from "./ProvidersView.module.css"; import { selectConfig } from "../../Config/configSlice"; export type ProvidersViewProps = { - configuredProviders: ConfiguredProvidersResponse["providers"]; + configuredProviders: ProviderListItem[]; + backFromProviders: () => void; }; export const ProvidersView: React.FC<ProvidersViewProps> = ({ configuredProviders, + backFromProviders, }) => { const dispatch = useAppDispatch(); @@ -36,22 +36,37 @@ export const ProvidersView: React.FC<ProvidersViewProps> = ({ const globalError = useAppSelector(getErrorMessage); const information = useAppSelector(getInformationMessage); - const [currentProvider, setCurrentProvider] = useState<SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - > | null>(null); + const [currentProvider, setCurrentProvider] = + useState<ProviderListItem | null>(null); const handleSetCurrentProvider = useCallback( - ( - provider: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - > | null, - ) => { + (provider: ProviderListItem | null) => { setCurrentProvider(provider); }, [], ); + const handleBackClick = useCallback(() => { + if (currentProvider) { + setCurrentProvider(null); + } else { + backFromProviders(); + } + }, [currentProvider, backFromProviders]); + return ( <Flex px="1" direction="column" height="100%" width="100%"> + {currentHost === "vscode" ? ( + <Flex gap="2" pb="3"> + <Button variant="surface" onClick={handleBackClick}> + <ArrowLeftIcon width="16" height="16" /> + Back + </Button> + </Flex> + ) : ( + <Button mr="auto" variant="outline" onClick={handleBackClick} mb="4"> + Back + </Button> + )} {!currentProvider && ( <ConfiguredProvidersView configuredProviders={configuredProviders} diff --git a/refact-agent/gui/src/features/Providers/ProvidersView/useConfiguredProvidersView.tsx b/refact-agent/gui/src/features/Providers/ProvidersView/useConfiguredProvidersView.tsx index ade797242..f9082ad6d 100644 --- a/refact-agent/gui/src/features/Providers/ProvidersView/useConfiguredProvidersView.tsx +++ b/refact-agent/gui/src/features/Providers/ProvidersView/useConfiguredProvidersView.tsx @@ -1,33 +1,11 @@ -import { useCallback, useEffect, useMemo, useState } from "react"; -import type { SimplifiedProvider } from "../../../services/refact"; -import { useGetProviderTemplatesQuery } from "../../../hooks/useProvidersQuery"; -import { ConfiguredProvidersViewProps } from "./ConfiguredProvidersView"; +import { useMemo } from "react"; +import type { ProviderListItem } from "../../../services/refact"; export function useGetConfiguredProvidersView({ configuredProviders, - handleSetCurrentProvider, }: { - configuredProviders: ConfiguredProvidersViewProps["configuredProviders"]; - handleSetCurrentProvider: ConfiguredProvidersViewProps["handleSetCurrentProvider"]; + configuredProviders: ProviderListItem[]; }) { - const { data: providerTemplatesData } = useGetProviderTemplatesQuery(); - - const notConfiguredProviderTemplates = useMemo(() => { - return providerTemplatesData - ? providerTemplatesData.provider_templates.reduce< - SimplifiedProvider<"name">[] - >((acc, provider) => { - if (!configuredProviders.some((p) => p.name === provider.name)) - acc.push(provider); - return acc; - }, []) - : []; - }, [configuredProviders, providerTemplatesData]); - - const [potentialCurrentProvider, setPotentialCurrentProvider] = useState< - SimplifiedProvider<"name"> | undefined - >(notConfiguredProviderTemplates[0] || undefined); - const sortedConfiguredProviders = useMemo(() => { return [...configuredProviders].sort((a, b) => { const getPriority = (provider: { name: string }) => { @@ -51,34 +29,7 @@ export function useGetConfiguredProvidersView({ }); }, [configuredProviders]); - const handlePotentialCurrentProvider = useCallback((value: string) => { - setPotentialCurrentProvider({ - name: value, - }); - }, []); - - const handleAddNewProvider = useCallback(() => { - if (!potentialCurrentProvider) return; - - handleSetCurrentProvider({ - name: potentialCurrentProvider.name, - enabled: true, - readonly: false, - supports_completion: false, - }); - }, [handleSetCurrentProvider, potentialCurrentProvider]); - - useEffect(() => { - if (notConfiguredProviderTemplates.length > 0) { - setPotentialCurrentProvider(notConfiguredProviderTemplates[0]); - } - }, [notConfiguredProviderTemplates]); - return { - handlePotentialCurrentProvider, - handleAddNewProvider, sortedConfiguredProviders, - notConfiguredProviderTemplates, - potentialCurrentProvider, }; } diff --git a/refact-agent/gui/src/features/Providers/constants.ts b/refact-agent/gui/src/features/Providers/constants.ts index 1ac6e76b1..e38b0526d 100644 --- a/refact-agent/gui/src/features/Providers/constants.ts +++ b/refact-agent/gui/src/features/Providers/constants.ts @@ -2,13 +2,18 @@ export const BEAUTIFUL_PROVIDER_NAMES: Record<string, string> = { refact: "Refact Cloud", refact_self_hosted: "Refact Self-Hosted", openai: "OpenAI", + openai_responses: "OpenAI (Responses API)", + openai_codex: "OpenAI Codex", openrouter: "OpenRouter", - groq: "Groq", // not sure about this one + groq: "Groq", anthropic: "Anthropic", + claude_code: "Claude Code", deepseek: "DeepSeek", google_gemini: "Google Gemini", ollama: "Ollama", lmstudio: "LM Studio", + vllm: "vLLM", xai: "xAI", + xai_responses: "xAI (Responses API)", custom: "Custom Provider", }; diff --git a/refact-agent/gui/src/features/Providers/getProviderName.ts b/refact-agent/gui/src/features/Providers/getProviderName.ts index a299e27fd..950af16ea 100644 --- a/refact-agent/gui/src/features/Providers/getProviderName.ts +++ b/refact-agent/gui/src/features/Providers/getProviderName.ts @@ -1,10 +1,9 @@ -import type { SimplifiedProvider } from "../../services/refact"; import { BEAUTIFUL_PROVIDER_NAMES } from "./constants"; -export function getProviderName(provider: SimplifiedProvider | string): string { +export function getProviderName(provider: { name: string } | string): string { if (typeof provider === "string") return BEAUTIFUL_PROVIDER_NAMES[provider]; const maybeName = provider.name; - if (!maybeName) return "Unknown Provider"; // TODO: throw error or think through it more + if (!maybeName) return "Unknown Provider"; const beautyName = BEAUTIFUL_PROVIDER_NAMES[maybeName] as string | undefined; return beautyName ? beautyName : maybeName; } diff --git a/refact-agent/gui/src/features/Providers/icons/Vllm.tsx b/refact-agent/gui/src/features/Providers/icons/Vllm.tsx new file mode 100644 index 000000000..ad69e98ea --- /dev/null +++ b/refact-agent/gui/src/features/Providers/icons/Vllm.tsx @@ -0,0 +1,37 @@ +import { FC, SVGProps } from "react"; + +export const VllmIcon: FC<SVGProps<SVGSVGElement>> = (props) => { + return ( + <svg + width="30" + height="30" + viewBox="0 0 24 24" + fill="none" + xmlns="http://www.w3.org/2000/svg" + {...props} + > + <rect + x="3" + y="3" + width="18" + height="18" + rx="4" + fill="currentColor" + opacity="0.12" + /> + <path + d="M7.5 9.2L10.5 15H13.5L16.5 9.2" + stroke="currentColor" + strokeWidth="1.6" + strokeLinecap="round" + strokeLinejoin="round" + /> + <path + d="M7.5 15H16.5" + stroke="currentColor" + strokeWidth="1.6" + strokeLinecap="round" + /> + </svg> + ); +}; diff --git a/refact-agent/gui/src/features/Providers/icons/iconsMap.tsx b/refact-agent/gui/src/features/Providers/icons/iconsMap.tsx index 922703e57..fa31dd63b 100644 --- a/refact-agent/gui/src/features/Providers/icons/iconsMap.tsx +++ b/refact-agent/gui/src/features/Providers/icons/iconsMap.tsx @@ -8,19 +8,25 @@ import { OllamaIcon } from "./Ollama"; import { OpenAIIcon } from "./OpenAI"; import { OpenRouterIcon } from "./OpenRouter"; import { RefactIcon } from "./Refact"; +import { VllmIcon } from "./Vllm"; import { XaiIcon } from "./Xai"; export const iconsMap: Record<string, JSX.Element> = { refact: <RefactIcon />, refact_self_hosted: <RefactIcon />, openai: <OpenAIIcon />, + openai_responses: <OpenAIIcon />, + openai_codex: <OpenAIIcon />, anthropic: <AnthropicIcon />, + claude_code: <AnthropicIcon />, google_gemini: <GeminiIcon />, openrouter: <OpenRouterIcon />, deepseek: <DeepSeekIcon />, groq: <GroqIcon />, ollama: <OllamaIcon />, lmstudio: <LMStudioIcon />, + vllm: <VllmIcon />, xai: <XaiIcon />, + xai_responses: <XaiIcon />, custom: <CustomIcon />, }; diff --git a/refact-agent/gui/src/features/Providers/useUpdateProvider.ts b/refact-agent/gui/src/features/Providers/useUpdateProvider.ts index 52d42b141..5f23afbdc 100644 --- a/refact-agent/gui/src/features/Providers/useUpdateProvider.ts +++ b/refact-agent/gui/src/features/Providers/useUpdateProvider.ts @@ -20,8 +20,6 @@ export const useUpdateProvider = ({ const [getProviderData] = providersApi.useLazyGetProviderQuery(); const [saveProviderData] = providersApi.useUpdateProviderMutation(); - // Use the provider name as the key to track state - // then get updating state from context const providerKey = provider.name; const isUpdatingEnabledState = updatingProviders[providerKey] || false; @@ -37,11 +35,14 @@ export const useUpdateProvider = ({ return; } - const enabled = providerData.enabled; + const newSettings = { + ...providerData.settings, + enabled: !providerData.enabled, + }; const response = await saveProviderData({ - ...providerData, - enabled: !enabled, + providerName: provider.name, + settings: newSettings, }); if (response.error) { @@ -57,9 +58,7 @@ export const useUpdateProvider = ({ } dispatch( - providersApi.util.invalidateTags([ - { type: "CONFIGURED_PROVIDERS", id: "LIST" }, - ]), + providersApi.util.invalidateTags([{ type: "PROVIDERS", id: "LIST" }]), ); setTimeout(() => { setProviderUpdating(providerKey, false); diff --git a/refact-agent/gui/src/features/Tasks/AgentStatusDot.tsx b/refact-agent/gui/src/features/Tasks/AgentStatusDot.tsx new file mode 100644 index 000000000..edd5944fd --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/AgentStatusDot.tsx @@ -0,0 +1,23 @@ +import React from "react"; +import styles from "./Tasks.module.css"; + +interface AgentStatusDotProps { + status: "doing" | "done" | "failed"; + size?: "small" | "medium"; +} + +export const AgentStatusDot: React.FC<AgentStatusDotProps> = ({ + status, + size = "medium", +}) => { + const sizeClass = + size === "small" ? styles.agentDotSmall : styles.agentDotMedium; + const statusClass = + status === "doing" + ? styles.agentDotDoing + : status === "done" + ? styles.agentDotDone + : styles.agentDotFailed; + + return <div className={`${sizeClass} ${statusClass}`} />; +}; diff --git a/refact-agent/gui/src/features/Tasks/KanbanBoard.tsx b/refact-agent/gui/src/features/Tasks/KanbanBoard.tsx new file mode 100644 index 000000000..2c99fe762 --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/KanbanBoard.tsx @@ -0,0 +1,152 @@ +import React, { useCallback } from "react"; +import { + Flex, + Box, + Text, + Card, + Badge, + Heading, + Tooltip, +} from "@radix-ui/themes"; +import type { + TaskBoard, + BoardCard, + BoardColumn, +} from "../../services/refact/tasks"; +import styles from "./Tasks.module.css"; + +const getPriorityColor = (priority: string): "red" | "orange" | "gray" => { + if (priority === "P0") return "red"; + if (priority === "P1") return "orange"; + return "gray"; +}; + +const columnColors: Record<string, string> = { + planned: "var(--gray-5)", + doing: "var(--blue-5)", + done: "var(--green-5)", + failed: "var(--red-5)", +}; + +interface KanbanCardProps { + card: BoardCard; + onClick?: (card: BoardCard) => void; +} + +const KanbanCard: React.FC<KanbanCardProps> = ({ card, onClick }) => { + const handleClick = useCallback(() => { + onClick?.(card); + }, [card, onClick]); + + const hasAgent = card.assignee !== null; + const hasDeps = card.depends_on.length > 0; + + return ( + <Card + className={styles.kanbanCard} + onClick={handleClick} + style={{ cursor: onClick ? "pointer" : "default" }} + > + <Flex direction="column" gap="1"> + <Flex justify="between" align="start"> + <Text size="2" weight="medium" style={{ flex: 1 }}> + {card.title} + </Text> + <Badge color={getPriorityColor(card.priority)} size="1"> + {card.priority} + </Badge> + </Flex> + + <Flex gap="1" wrap="wrap"> + {hasAgent && ( + <Tooltip content={`Agent: ${card.assignee}`}> + <Badge size="1" color="blue" variant="soft"> + 🤖 Agent + </Badge> + </Tooltip> + )} + {hasDeps && ( + <Tooltip content={`Depends on: ${card.depends_on.join(", ")}`}> + <Badge size="1" color="gray" variant="soft"> + ⛓️ {card.depends_on.length} + </Badge> + </Tooltip> + )} + {card.status_updates.length > 0 && ( + <Badge size="1" color="gray" variant="soft"> + 📝 {card.status_updates.length} + </Badge> + )} + </Flex> + </Flex> + </Card> + ); +}; + +interface KanbanColumnProps { + column: BoardColumn; + cards: BoardCard[]; + onCardClick?: (card: BoardCard) => void; +} + +const KanbanColumn: React.FC<KanbanColumnProps> = ({ + column, + cards, + onCardClick, +}) => { + return ( + <Flex + direction="column" + className={styles.kanbanColumn} + style={{ borderTopColor: columnColors[column.id] || "var(--gray-5)" }} + > + <Flex + justify="between" + align="center" + className={styles.kanbanColumnHeader} + > + <Heading size="1">{column.title}</Heading> + <Badge size="1" color="gray"> + {cards.length} + </Badge> + </Flex> + <Box className={styles.kanbanColumnContent}> + <Flex direction="column" gap="1"> + {cards.map((card) => ( + <KanbanCard key={card.id} card={card} onClick={onCardClick} /> + ))} + </Flex> + </Box> + </Flex> + ); +}; + +interface KanbanBoardProps { + board: TaskBoard; + onCardClick?: (card: BoardCard) => void; +} + +export const KanbanBoard: React.FC<KanbanBoardProps> = ({ + board, + onCardClick, +}) => { + const getCardsForColumn = useCallback( + (columnId: string): BoardCard[] => { + return board.cards.filter((card) => card.column === columnId); + }, + [board.cards], + ); + + return ( + <Flex className={styles.kanbanBoard}> + {board.columns.map((column) => ( + <KanbanColumn + key={column.id} + column={column} + cards={getCardsForColumn(column.id)} + onCardClick={onCardClick} + /> + ))} + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Tasks/TaskList.tsx b/refact-agent/gui/src/features/Tasks/TaskList.tsx new file mode 100644 index 000000000..c9549f65b --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/TaskList.tsx @@ -0,0 +1,326 @@ +import React, { useCallback, useState } from "react"; +import { + Flex, + Box, + Text, + Button, + Card, + Badge, + TextField, + Heading, + Spinner, +} from "@radix-ui/themes"; +import { + PlusIcon, + DotFilledIcon, + CheckCircledIcon, + CrossCircledIcon, + LayersIcon, + PauseIcon, +} from "@radix-ui/react-icons"; +import { ChatLoading } from "../../components/ChatContent/ChatLoading"; +import { ScrollArea } from "../../components/ScrollArea"; +import { CloseButton } from "../../components/Buttons/Buttons"; +import { useAppDispatch } from "../../hooks"; +import { push } from "../Pages/pagesSlice"; +import { + useListTasksQuery, + useCreateTaskMutation, + useDeleteTaskMutation, + TaskMeta, +} from "../../services/refact/tasks"; +import { openTask } from "./tasksSlice"; + +const statusColors: Record< + TaskMeta["status"], + "gray" | "blue" | "yellow" | "green" | "red" +> = { + planning: "gray", + active: "blue", + paused: "yellow", + completed: "green", + abandoned: "red", +}; + +const statusLabels: Record<TaskMeta["status"], string> = { + planning: "Planning", + active: "Active", + paused: "Paused", + completed: "Done", + abandoned: "Abandoned", +}; + +interface TaskItemProps { + task: TaskMeta; + onClick: () => void; + onDelete: () => void; +} + +const TaskItem: React.FC<TaskItemProps> = ({ task, onClick, onDelete }) => { + const dateUpdated = new Date(task.updated_at); + const dateTimeString = dateUpdated.toLocaleString(); + const plannerState = task.planner_session_state; + const isPlannerWorking = + plannerState === "generating" || plannerState === "executing_tools"; + const isPlannerPaused = + plannerState === "paused" || plannerState === "waiting_ide"; + const isPlannerError = plannerState === "error"; + const isCompleted = task.status === "completed"; + const isFailed = task.status === "abandoned"; + + return ( + <Box style={{ position: "relative", width: "100%" }}> + <Card + style={{ width: "100%", marginBottom: "2px" }} + variant="surface" + className="rt-Button" + asChild + role="button" + > + <button + onClick={(event) => { + event.preventDefault(); + event.stopPropagation(); + onClick(); + }} + > + <Flex gap="1" align="center"> + {isPlannerWorking && ( + <Spinner style={{ minWidth: 16, minHeight: 16 }} /> + )} + {!isPlannerWorking && isPlannerPaused && ( + <PauseIcon + style={{ + minWidth: 16, + minHeight: 16, + color: "var(--yellow-9)", + }} + /> + )} + {!isPlannerWorking && !isPlannerPaused && isPlannerError && ( + <CrossCircledIcon + style={{ minWidth: 16, minHeight: 16, color: "var(--red-9)" }} + /> + )} + {!isPlannerWorking && + !isPlannerPaused && + !isPlannerError && + isCompleted && ( + <CheckCircledIcon + style={{ + minWidth: 16, + minHeight: 16, + color: "var(--green-9)", + }} + /> + )} + {!isPlannerWorking && + !isPlannerPaused && + !isPlannerError && + isFailed && ( + <CrossCircledIcon + style={{ minWidth: 16, minHeight: 16, color: "var(--red-9)" }} + /> + )} + {!isPlannerWorking && + !isPlannerPaused && + !isPlannerError && + !isCompleted && + !isFailed && ( + <DotFilledIcon + style={{ + minWidth: 16, + minHeight: 16, + color: "var(--gray-9)", + }} + /> + )} + <Text + as="div" + size="2" + weight="bold" + style={{ + textOverflow: "ellipsis", + overflow: "hidden", + whiteSpace: "nowrap", + }} + > + {task.name} + </Text> + <Badge color={statusColors[task.status]} size="1" ml="2"> + {statusLabels[task.status]} + </Badge> + </Flex> + + <Flex justify="between" mt="8px"> + <Flex gap="4"> + <Text + size="1" + style={{ display: "flex", gap: "4px", alignItems: "center" }} + > + <LayersIcon /> {task.cards_done}/{task.cards_total} + {task.cards_failed > 0 && ( + <Text size="1" color="red"> + ({task.cards_failed} failed) + </Text> + )} + </Text> + {task.agents_active > 0 && ( + <Text + size="1" + color="blue" + style={{ display: "flex", gap: "4px", alignItems: "center" }} + > + <Spinner style={{ width: 12, height: 12 }} />{" "} + {task.agents_active} agent{task.agents_active > 1 ? "s" : ""} + </Text> + )} + </Flex> + <Text size="1" color="gray"> + {dateTimeString} + </Text> + </Flex> + </button> + </Card> + + <Flex + position="absolute" + top="6px" + right="6px" + gap="1" + justify="end" + align="center" + > + <CloseButton + size="1" + onClick={(event) => { + event.preventDefault(); + event.stopPropagation(); + onDelete(); + }} + iconSize={10} + title="delete task" + /> + </Flex> + </Box> + ); +}; + +export const TaskList: React.FC = () => { + const dispatch = useAppDispatch(); + const { data: tasks = [], isLoading } = useListTasksQuery(undefined, { + pollingInterval: 0, + }); + const [createTask] = useCreateTaskMutation(); + const [deleteTask] = useDeleteTaskMutation(); + const [newTaskName, setNewTaskName] = useState(""); + const [isCreating, setIsCreating] = useState(false); + + const handleCreateTask = useCallback(() => { + if (!newTaskName.trim()) return; + createTask({ name: newTaskName.trim() }) + .unwrap() + .then((task) => { + setNewTaskName(""); + setIsCreating(false); + dispatch(openTask({ id: task.id, name: task.name })); + dispatch(push({ name: "task workspace", taskId: task.id })); + }) + .catch(() => { + // Error handling via RTK Query + }); + }, [createTask, dispatch, newTaskName]); + + const handleTaskClick = useCallback( + (task: TaskMeta) => { + dispatch(openTask({ id: task.id, name: task.name })); + dispatch(push({ name: "task workspace", taskId: task.id })); + }, + [dispatch], + ); + + const handleDeleteTask = useCallback( + (taskId: string) => { + void deleteTask(taskId); + }, + [deleteTask], + ); + + const handleKeyDown = useCallback( + (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + handleCreateTask(); + } else if (e.key === "Escape") { + setIsCreating(false); + setNewTaskName(""); + } + }, + [handleCreateTask], + ); + + if (isLoading) { + return <ChatLoading />; + } + + return ( + <Flex direction="column" style={{ height: "100%" }} p="4" gap="4"> + <Flex justify="between" align="center"> + <Heading size="4">Tasks</Heading> + {!isCreating && ( + <Button size="2" onClick={() => setIsCreating(true)}> + <PlusIcon /> New Task + </Button> + )} + </Flex> + + {isCreating && ( + <Card> + <Flex gap="2"> + <TextField.Root + style={{ flex: 1 }} + placeholder="Task name..." + value={newTaskName} + onChange={(e) => setNewTaskName(e.target.value)} + onKeyDown={handleKeyDown} + autoFocus + /> + <Button onClick={handleCreateTask} disabled={!newTaskName.trim()}> + Create + </Button> + <Button + variant="soft" + color="gray" + onClick={() => { + setIsCreating(false); + setNewTaskName(""); + }} + > + Cancel + </Button> + </Flex> + </Card> + )} + + <Box style={{ flex: 1, overflow: "hidden" }}> + <ScrollArea scrollbars="vertical"> + <Flex direction="column" gap="2"> + {tasks.length === 0 ? ( + <Text color="gray" size="2"> + No tasks yet. Create one to start planning. + </Text> + ) : ( + tasks.map((task) => ( + <TaskItem + key={task.id} + task={task} + onClick={() => handleTaskClick(task)} + onDelete={() => handleDeleteTask(task.id)} + /> + )) + )} + </Flex> + </ScrollArea> + </Box> + </Flex> + ); +}; diff --git a/refact-agent/gui/src/features/Tasks/TaskWorkspace.tsx b/refact-agent/gui/src/features/Tasks/TaskWorkspace.tsx new file mode 100644 index 000000000..1a1b08b4a --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/TaskWorkspace.tsx @@ -0,0 +1,847 @@ +import React, { useCallback, useState, useEffect, useMemo } from "react"; +import { + Flex, + Box, + Text, + Button, + Heading, + Badge, + Card, +} from "@radix-ui/themes"; +import { + ArrowLeftIcon, + PlusIcon, + PersonIcon, + Cross2Icon, + ChevronDownIcon, +} from "@radix-ui/react-icons"; +import { AgentStatusDot } from "./AgentStatusDot"; +import { ScrollArea } from "../../components/ScrollArea"; +import { ChatLoading } from "../../components/ChatContent/ChatLoading"; +import { useAppDispatch, useAppSelector } from "../../hooks"; +import { pop } from "../Pages/pagesSlice"; +import { KanbanBoard } from "./KanbanBoard"; +import { + useGetTaskQuery, + useGetBoardQuery, + useListTaskTrajectoriesQuery, + useUpdateTaskMetaMutation, + useCreatePlannerChatMutation, + BoardCard, +} from "../../services/refact/tasks"; +import { ModelSelector } from "../../components/Chat/ModelSelector"; +import styles from "./Tasks.module.css"; +import { Chat } from "../Chat"; +import { selectConfig } from "../Config/configSlice"; +import { createChatWithId, switchToThread } from "../Chat/Thread"; +import { + openTask, + addPlannerChat, + removePlannerChat, + selectOpenTasksFromRoot, + setTaskActiveChat, + selectTaskActiveChat, + PlannerInfo, +} from "./tasksSlice"; +import { selectThreadById } from "../Chat/Thread"; +import { InternalLinkProvider } from "../../contexts/InternalLinkContext"; +import { parseRefactLink } from "../../contexts/internalLinkUtils"; + +type ActiveChat = + | { type: "planner"; chatId: string } + | { type: "agent"; cardId: string; chatId: string } + | null; + +interface PlannerPanelProps { + plannerChats: PlannerInfo[]; + activeChat: ActiveChat; + activePlannerId: string | null; + onNewPlanner: () => void; + onSelectPlanner: (chatId: string) => void; + onRemovePlanner: (chatId: string) => void; +} + +interface PlannerItemProps { + planner: PlannerInfo; + isSelected: boolean; + isActive: boolean; + onSelect: () => void; + onRemove: () => void; +} + +function formatPlannerDate(dateStr: string): string { + if (!dateStr) return ""; + try { + const date = new Date(dateStr); + return date.toLocaleDateString(undefined, { + month: "short", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + }); + } catch { + return ""; + } +} + +const PlannerItem: React.FC<PlannerItemProps> = ({ + planner, + isSelected, + isActive, + onSelect, + onRemove, +}) => { + const thread = useAppSelector((state) => selectThreadById(state, planner.id)); + const title = thread?.title ?? planner.title; + const hasGeneratedTitle = + title && title !== "New Chat" && title.trim() !== ""; + const displayTitle = hasGeneratedTitle + ? title + : formatPlannerDate(planner.createdAt); + + return ( + <Box + className={styles.panelItem} + onClick={onSelect} + style={{ background: isSelected ? "var(--accent-4)" : undefined }} + > + <Flex align="center" gap="1"> + {isActive && ( + <Badge size="1" color="green" radius="full"> + ● + </Badge> + )} + <Badge size="1" color="violet"> + 📋 + </Badge> + </Flex> + <Text + size="1" + style={{ + flex: 1, + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + }} + > + {displayTitle} + </Text> + <Button + size="1" + variant="ghost" + color="gray" + onClick={(e) => { + e.stopPropagation(); + onRemove(); + }} + > + <Cross2Icon /> + </Button> + </Box> + ); +}; + +const PlannerPanel: React.FC<PlannerPanelProps> = ({ + plannerChats, + activeChat, + activePlannerId, + onNewPlanner, + onSelectPlanner, + onRemovePlanner, +}) => { + return ( + <Box className={styles.panel}> + <Flex className={styles.panelHeader}> + <Text size="2" weight="medium"> + Planners + </Text> + <Button size="1" variant="ghost" onClick={onNewPlanner}> + <PlusIcon /> + </Button> + </Flex> + <Box className={styles.panelContent}> + {plannerChats.length === 0 ? ( + <Flex align="center" justify="center" style={{ flex: 1 }}> + <Text size="1" color="gray"> + No planner chats yet + </Text> + </Flex> + ) : ( + <ScrollArea scrollbars="vertical"> + <Flex direction="column" gap="1"> + {plannerChats.map((planner) => ( + <PlannerItem + key={planner.id} + planner={planner} + isSelected={ + activeChat?.type === "planner" && + activeChat.chatId === planner.id + } + isActive={planner.id === activePlannerId} + onSelect={() => onSelectPlanner(planner.id)} + onRemove={() => onRemovePlanner(planner.id)} + /> + ))} + </Flex> + </ScrollArea> + )} + </Box> + </Box> + ); +}; + +interface AgentsPanelProps { + cards: BoardCard[]; + activeChat: ActiveChat; + onSelectAgent: (cardId: string, chatId: string) => void; + defaultAgentModel?: string; + onModelChange?: (model: string) => void; +} + +const AgentsPanel: React.FC<AgentsPanelProps> = ({ + cards, + activeChat, + onSelectAgent, + defaultAgentModel, + onModelChange, +}) => { + const activeAgents = cards.filter( + (c) => c.column === "doing" && c.agent_chat_id, + ); + const completedAgents = cards.filter( + (c) => c.column === "done" && c.agent_chat_id, + ); + const failedAgents = cards.filter( + (c) => c.column === "failed" && c.agent_chat_id, + ); + + const total = + completedAgents.length + failedAgents.length + activeAgents.length; + const done = completedAgents.length; + + const renderAgentItem = ( + card: BoardCard, + status: "doing" | "done" | "failed", + ) => { + const isActive = + activeChat?.type === "agent" && activeChat.cardId === card.id; + return ( + <Box + key={card.id} + className={styles.panelItem} + onClick={() => + card.agent_chat_id && onSelectAgent(card.id, card.agent_chat_id) + } + style={{ background: isActive ? "var(--accent-4)" : undefined }} + > + <AgentStatusDot status={status} size="medium" /> + <Text size="1" style={{ flex: 1 }}> + {card.title} + </Text> + </Box> + ); + }; + + return ( + <Box className={styles.panel}> + <Flex className={styles.panelHeader}> + <Text size="2" weight="medium"> + Agents + </Text> + {total > 0 && ( + <Badge size="1" color="gray"> + {done}/{total} done + </Badge> + )} + </Flex> + <Box className={styles.panelContent}> + {activeAgents.length === 0 && + completedAgents.length === 0 && + failedAgents.length === 0 ? ( + <Flex align="center" justify="center" style={{ flex: 1 }}> + <Text size="1" color="gray"> + No agents yet + </Text> + </Flex> + ) : ( + <ScrollArea scrollbars="vertical"> + <Flex direction="column" gap="1"> + {activeAgents.map((card) => renderAgentItem(card, "doing"))} + {completedAgents.map((card) => renderAgentItem(card, "done"))} + {failedAgents.map((card) => renderAgentItem(card, "failed"))} + </Flex> + </ScrollArea> + )} + </Box> + <Flex + p="2" + align="center" + justify="between" + gap="2" + style={{ borderTop: "1px solid var(--gray-4)" }} + > + <Text size="1" color="gray"> + Agent model + </Text> + {onModelChange && ( + <ModelSelector + value={defaultAgentModel} + onValueChange={onModelChange} + showLabel={false} + /> + )} + </Flex> + </Box> + ); +}; + +interface CardDetailProps { + card: BoardCard; + onClose: () => void; +} + +const CardDetail: React.FC<CardDetailProps> = ({ card, onClose }) => { + return ( + <Box className={styles.cardDetailOverlay} onClick={onClose}> + <Card className={styles.cardDetail} onClick={(e) => e.stopPropagation()}> + <Flex direction="column" gap="3"> + <Flex justify="between" align="center"> + <Heading size="3">{card.title}</Heading> + <Badge + color={ + card.column === "done" + ? "green" + : card.column === "failed" + ? "red" + : "blue" + } + > + {card.column} + </Badge> + </Flex> + + {card.depends_on.length > 0 && ( + <Box> + <Text size="2" weight="medium" color="gray"> + Dependencies + </Text> + <Flex gap="1" mt="1"> + {card.depends_on.map((dep) => ( + <Badge key={dep} size="1" variant="soft"> + {dep} + </Badge> + ))} + </Flex> + </Box> + )} + + {card.instructions && ( + <Box> + <Text size="2" weight="medium" color="gray"> + Instructions + </Text> + <Box + p="2" + mt="1" + style={{ + background: "var(--gray-2)", + borderRadius: "var(--radius-2)", + whiteSpace: "pre-wrap", + }} + > + <Text size="2">{card.instructions}</Text> + </Box> + </Box> + )} + + {card.final_report && ( + <Box> + <Text size="2" weight="medium" color="gray"> + Final Report + </Text> + <Box + p="2" + mt="1" + style={{ + background: "var(--green-2)", + borderRadius: "var(--radius-2)", + whiteSpace: "pre-wrap", + }} + > + <Text size="2">{card.final_report}</Text> + </Box> + </Box> + )} + + {card.status_updates.length > 0 && ( + <Box> + <Text size="2" weight="medium" color="gray"> + Updates + </Text> + <Flex direction="column" gap="1" mt="1"> + {card.status_updates.map((update, i) => ( + <Text key={i} size="1" color="gray"> + {new Date(update.timestamp).toLocaleString()}:{" "} + {update.message} + </Text> + ))} + </Flex> + </Box> + )} + + <Flex justify="end"> + <Button variant="soft" onClick={onClose}> + Close + </Button> + </Flex> + </Flex> + </Card> + </Box> + ); +}; + +interface TaskWorkspaceProps { + taskId: string; +} + +export const TaskWorkspace: React.FC<TaskWorkspaceProps> = ({ taskId }) => { + const dispatch = useAppDispatch(); + const config = useAppSelector(selectConfig); + const { data: task, isLoading: taskLoading } = useGetTaskQuery(taskId, { + pollingInterval: 0, + }); + const { data: board, isLoading: boardLoading } = useGetBoardQuery(taskId, { + pollingInterval: 0, + }); + const { data: savedPlanners } = useListTaskTrajectoriesQuery({ + taskId, + role: "planner", + }); + const [updateTaskMeta] = useUpdateTaskMetaMutation(); + const [createPlannerChat, { isLoading: isCreatingPlanner }] = + useCreatePlannerChatMutation(); + const openTasks = useAppSelector(selectOpenTasksFromRoot); + const currentTaskUI = openTasks.find((t) => t.id === taskId); + const plannerChats = useMemo( + () => currentTaskUI?.plannerChats ?? [], + [currentTaskUI?.plannerChats], + ); + const activePlannerId = useMemo(() => { + if (plannerChats.length === 0) return null; + return plannerChats.reduce((latest, p) => + p.updatedAt > latest.updatedAt ? p : latest, + ).id; + }, [plannerChats]); + const activeChat = useAppSelector((state) => + selectTaskActiveChat(state, taskId), + ); + const [selectedCard, setSelectedCard] = useState<BoardCard | null>(null); + const [notification, setNotification] = useState<string | null>(null); + const [chatExpanded, setChatExpanded] = useState(false); + const plannersRestoredRef = React.useRef(false); + const prevTaskStatusRef = React.useRef<string | undefined>(undefined); + + useEffect(() => { + if (task) { + dispatch(openTask({ id: taskId, name: task.name })); + } + }, [dispatch, taskId, task]); + + useEffect(() => { + if (!savedPlanners || plannersRestoredRef.current) return; + plannersRestoredRef.current = true; + + for (const traj of savedPlanners) { + if (plannerChats.some((p) => p.id === traj.id)) continue; + + dispatch( + createChatWithId({ + id: traj.id, + title: traj.title, + isTaskChat: true, + mode: "TASK_PLANNER", + taskMeta: { task_id: taskId, role: "planner" }, + }), + ); + dispatch( + addPlannerChat({ + taskId, + planner: { + id: traj.id, + title: traj.title, + createdAt: traj.created_at, + updatedAt: traj.updated_at, + sessionState: traj.session_state, + }, + }), + ); + } + + if (savedPlanners.length > 0 && !activeChat) { + const mostRecent = savedPlanners.reduce((latest, p) => + p.updated_at > latest.updated_at ? p : latest, + ); + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: mostRecent.id }, + }), + ); + } + }, [dispatch, taskId, savedPlanners, plannerChats, activeChat]); + + useEffect(() => { + if ( + activeChat?.type === "planner" && + !plannerChats.some((p) => p.id === activeChat.chatId) + ) { + if (activePlannerId) { + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: activePlannerId }, + }), + ); + } else { + dispatch(setTaskActiveChat({ taskId, activeChat: null })); + } + } + }, [activeChat, plannerChats, activePlannerId, dispatch, taskId]); + + useEffect(() => { + if (activeChat?.type === "agent" && board) { + const cardExists = board.cards.some((c) => c.id === activeChat.cardId); + if (!cardExists) { + if (activePlannerId) { + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: activePlannerId }, + }), + ); + } else { + dispatch(setTaskActiveChat({ taskId, activeChat: null })); + } + } + } + }, [activeChat, board, dispatch, taskId, activePlannerId]); + useEffect(() => { + if (!task) return; + + const prevStatus = prevTaskStatusRef.current; + const currentStatus = task.status; + + prevTaskStatusRef.current = currentStatus; + + if (prevStatus === "planning" && currentStatus === "active") { + setNotification("Planning complete! You can now spawn agents."); + setTimeout(() => setNotification(null), 3000); + } + }, [task]); + + // Switch chat when activeChat changes + useEffect(() => { + if (!activeChat) return; + const chatId = activeChat.chatId; + dispatch(switchToThread({ id: chatId, openTab: false })); + }, [dispatch, activeChat]); + + const handleBack = useCallback(() => { + dispatch(pop()); + }, [dispatch]); + + const handleCardClick = useCallback((card: BoardCard) => { + setSelectedCard(card); + }, []); + + const handleNewPlanner = useCallback(() => { + if (isCreatingPlanner) return; + createPlannerChat(taskId) + .unwrap() + .then((result) => { + const newChatId = result.chat_id; + const now = new Date().toISOString(); + dispatch( + createChatWithId({ + id: newChatId, + title: "", + isTaskChat: true, + mode: "TASK_PLANNER", + taskMeta: { task_id: taskId, role: "planner" }, + }), + ); + dispatch( + addPlannerChat({ + taskId, + planner: { + id: newChatId, + title: "", + createdAt: now, + updatedAt: now, + }, + }), + ); + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: newChatId }, + }), + ); + }) + .catch(() => undefined); + }, [dispatch, taskId, createPlannerChat, isCreatingPlanner]); + + const handleRemovePlanner = useCallback( + (chatId: string) => { + dispatch(removePlannerChat({ taskId, chatId })); + if (activeChat?.type === "planner" && activeChat.chatId === chatId) { + const remaining = plannerChats.filter((p) => p.id !== chatId); + if (remaining.length > 0) { + const mostRecent = remaining.reduce((latest, p) => + p.updatedAt > latest.updatedAt ? p : latest, + ); + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: mostRecent.id }, + }), + ); + } else { + dispatch(setTaskActiveChat({ taskId, activeChat: null })); + } + } + }, + [dispatch, taskId, activeChat, plannerChats], + ); + + const handleSelectPlanner = useCallback( + (chatId: string) => { + dispatch( + setTaskActiveChat({ taskId, activeChat: { type: "planner", chatId } }), + ); + }, + [dispatch, taskId], + ); + + const handleSelectAgent = useCallback( + (cardId: string, chatId: string) => { + const card = board?.cards.find((c) => c.id === cardId); + const cardTitle = card?.title ?? `Card ${cardId}`; + + dispatch( + createChatWithId({ + id: chatId, + title: `Agent: ${cardTitle}`, + isTaskChat: true, + mode: "TASK_AGENT", + taskMeta: { task_id: taskId, role: "agents", card_id: cardId }, + model: task?.default_agent_model, + }), + ); + + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "agent", cardId, chatId }, + }), + ); + }, + [board, taskId, dispatch, task?.default_agent_model], + ); + + const handleInternalLink = useCallback( + (url: string): boolean => { + const parsed = parseRefactLink(url); + if (!parsed) return false; + + if (parsed.type === "chat") { + const chatId = parsed.id; + const card = board?.cards.find((c) => c.agent_chat_id === chatId); + + let cardId = card?.id ?? ""; + if (!cardId && chatId.startsWith("agent-")) { + // Format: agent-{card_id}-{uuid8} + // Parse from end to handle hyphenated card IDs like "T-1" + const withoutPrefix = chatId.slice("agent-".length); + const lastDashIdx = withoutPrefix.lastIndexOf("-"); + if (lastDashIdx > 0) { + cardId = withoutPrefix.slice(0, lastDashIdx); + } + } + + const cardTitle = card?.title ?? `Card ${cardId}`; + + dispatch( + createChatWithId({ + id: chatId, + title: `Agent: ${cardTitle}`, + isTaskChat: true, + mode: "TASK_AGENT", + taskMeta: { task_id: taskId, role: "agents", card_id: cardId }, + model: task?.default_agent_model, + }), + ); + + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "agent", cardId, chatId }, + }), + ); + return true; + } + + return false; + }, + [board, taskId, dispatch, task?.default_agent_model], + ); + + const handleToggleChatExpanded = useCallback(() => { + setChatExpanded((prev) => !prev); + }, []); + + const handleModelChange = useCallback( + (model: string) => { + void updateTaskMeta({ taskId, defaultAgentModel: model }); + }, + [taskId, updateTaskMeta], + ); + + if (taskLoading || boardLoading || !task || !board) { + return <ChatLoading />; + } + + const chatLabel = !activeChat + ? "No chat selected" + : activeChat.type === "planner" + ? `Planner` + : `Agent: ${ + board.cards.find((c) => c.id === activeChat.cardId)?.title ?? "" + }`; + + const branchDisplay = + activeChat?.type === "agent" + ? board.cards.find((c) => c.id === activeChat.cardId)?.agent_branch ?? + task.base_branch ?? + "(unknown)" + : task.base_branch ?? "(unknown)"; + + return ( + <Box + className={`${styles.taskWorkspace} ${ + chatExpanded ? styles.expanded : "" + }`} + > + <Flex className={styles.taskHeader} justify="between" align="center"> + <Flex align="center" gap="3"> + <Button variant="ghost" size="1" onClick={handleBack}> + <ArrowLeftIcon /> + </Button> + <Heading size="4">{task.name}</Heading> + <Badge + color={ + task.status === "active" + ? "blue" + : task.status === "completed" + ? "green" + : "gray" + } + > + {task.status} + </Badge> + <Badge color="gray">🌿 {branchDisplay}</Badge> + </Flex> + <Text size="1" color="gray"> + {task.cards_done}/{task.cards_total} done + {task.cards_failed > 0 && ` • ${task.cards_failed} failed`} + </Text> + </Flex> + + {!chatExpanded && ( + <> + <Box className={styles.boardSection}> + <KanbanBoard board={board} onCardClick={handleCardClick} /> + </Box> + + <Flex className={styles.panelsSection}> + <PlannerPanel + plannerChats={plannerChats} + activeChat={activeChat} + activePlannerId={activePlannerId} + onNewPlanner={handleNewPlanner} + onSelectPlanner={handleSelectPlanner} + onRemovePlanner={handleRemovePlanner} + /> + <AgentsPanel + cards={board.cards} + activeChat={activeChat} + onSelectAgent={handleSelectAgent} + defaultAgentModel={task.default_agent_model} + onModelChange={handleModelChange} + /> + </Flex> + </> + )} + + <Box className={styles.chatSection}> + <Flex + className={styles.chatHeader} + align="center" + gap="2" + px="3" + py="2" + onClick={handleToggleChatExpanded} + style={{ cursor: "pointer" }} + > + <ChevronDownIcon + className={`${styles.chevron} ${ + chatExpanded ? styles.chevronExpanded : "" + }`} + /> + <PersonIcon /> + <Text size="2" weight="medium"> + {chatLabel} + </Text> + </Flex> + <Box className={styles.chatContent}> + {activeChat ? ( + <InternalLinkProvider onInternalLink={handleInternalLink}> + <Chat + host={config.host} + tabbed={false} + backFromChat={handleBack} + /> + </InternalLinkProvider> + ) : ( + <Flex align="center" justify="center" style={{ height: "100%" }}> + <Text color="gray">Create a planner chat to get started</Text> + </Flex> + )} + </Box> + </Box> + + {selectedCard && ( + <CardDetail card={selectedCard} onClose={() => setSelectedCard(null)} /> + )} + + {notification && ( + <Box + style={{ + position: "fixed", + bottom: "var(--space-4)", + left: "50%", + transform: "translateX(-50%)", + background: "var(--accent-9)", + color: "white", + padding: "var(--space-3) var(--space-4)", + borderRadius: "var(--radius-3)", + zIndex: 50, + boxShadow: "0 4px 12px rgba(0, 0, 0, 0.15)", + }} + > + <Text size="2">{notification}</Text> + </Box> + )} + </Box> + ); +}; diff --git a/refact-agent/gui/src/features/Tasks/Tasks.module.css b/refact-agent/gui/src/features/Tasks/Tasks.module.css new file mode 100644 index 000000000..96350b96d --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/Tasks.module.css @@ -0,0 +1,235 @@ +.kanbanBoard { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + display: flex; + gap: var(--space-3); + justify-content: center; + align-items: flex-start; + padding: var(--space-2); + overflow-x: auto; +} + +.kanbanColumn { + flex: 0 1 auto; + min-width: 140px; + max-width: 220px; + width: fit-content; + background: var(--gray-2); + border-radius: var(--radius-3); + border-top: 3px solid var(--gray-5); + display: flex; + flex-direction: column; +} + +.kanbanColumnContent { + padding: var(--space-1); +} + +.kanbanCard { + background: var(--color-background); + transition: transform 0.1s ease; + padding: var(--space-2); +} + +.kanbanCard:hover { + transform: translateY(-1px); +} + +.kanbanColumnHeader { + padding: var(--space-1) var(--space-2); + border-bottom: 1px solid var(--gray-4); +} + +.taskWorkspace { + display: flex; + flex-direction: column; + height: 100%; + overflow: hidden; +} + +.taskWorkspace.expanded .boardSection, +.taskWorkspace.expanded .panelsSection { + display: none; +} + +.taskHeader { + padding: var(--space-3) var(--space-4); + border-bottom: 1px solid var(--gray-5); + flex-shrink: 0; +} + +.boardSection { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + flex: 0 0 auto; + max-height: 30%; + min-height: 80px; + overflow: auto; + border-bottom: 1px solid var(--gray-5); +} + +.panelsSection { + display: flex; + gap: var(--space-3); + padding: var(--space-3); + border-bottom: 1px solid var(--gray-5); + min-height: 180px; + max-height: 280px; + overflow: hidden; +} + +.panel { + flex: 1; + background: var(--gray-2); + border-radius: var(--radius-3); + padding: var(--space-2); + overflow: hidden; + display: flex; + flex-direction: column; +} + +.panelHeader { + display: flex; + justify-content: space-between; + align-items: center; + padding-bottom: var(--space-2); + border-bottom: 1px solid var(--gray-4); + margin-bottom: var(--space-2); +} + +.panelContent { + flex: 1; + overflow: hidden; + display: flex; + flex-direction: column; +} + +.chatSection { + flex: 1; + min-height: 0; + display: flex; + flex-direction: column; + overflow: hidden; +} + +.chatHeader { + flex-shrink: 0; + border-bottom: 1px solid var(--gray-4); + background: var(--gray-2); + user-select: none; + transition: background-color 0.2s ease; + cursor: pointer; +} + +.chatHeader:hover { + background: var(--gray-3); +} + +.chevron { + transition: transform 0.3s ease; + display: flex; + align-items: center; + justify-content: center; +} + +.chevron.chevronExpanded { + transform: rotate(180deg); +} + +.chatContent { + flex: 1; + min-height: 0; + display: flex; + flex-direction: column; + overflow: hidden; +} + +.panelItem { + padding: var(--space-1) var(--space-2); + border-radius: var(--radius-2); + cursor: pointer; + display: flex; + align-items: center; + gap: var(--space-2); +} + +.panelItem:hover { + background: var(--gray-4); +} + +.cardDetailOverlay { + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.5); + display: flex; + align-items: center; + justify-content: center; + z-index: 100; +} + +.cardDetail { + composes: scrollbarThin from "../../components/shared/scrollbar.module.css"; + background: var(--color-background); + border-radius: var(--radius-4); + padding: var(--space-4); + max-width: 600px; + max-height: 80vh; + overflow: auto; + width: 90%; +} + +@keyframes pulseBlue { + 0% { + opacity: 1; + transform: scale(1); + } + 50% { + opacity: 0.5; + transform: scale(1.2); + } + 100% { + opacity: 1; + transform: scale(1); + } +} + +@keyframes pulseGreen { + 0% { + opacity: 1; + transform: scale(1); + } + 50% { + opacity: 0.7; + transform: scale(1.1); + } + 100% { + opacity: 1; + transform: scale(1); + } +} + +.agentDotSmall { + width: 8px; + height: 8px; + border-radius: 50%; + flex-shrink: 0; +} + +.agentDotMedium { + width: 12px; + height: 12px; + border-radius: 50%; + flex-shrink: 0; +} + +.agentDotDoing { + background-color: var(--blue-9); + animation: pulseBlue 1.5s ease-in-out infinite; +} + +.agentDotDone { + background-color: var(--green-9); + animation: pulseGreen 2s ease-in-out infinite; +} + +.agentDotFailed { + background-color: var(--red-9); +} diff --git a/refact-agent/gui/src/features/Tasks/index.ts b/refact-agent/gui/src/features/Tasks/index.ts new file mode 100644 index 000000000..0b7aefc3c --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/index.ts @@ -0,0 +1,16 @@ +export { TaskList } from "./TaskList"; +export { TaskWorkspace } from "./TaskWorkspace"; +export { KanbanBoard } from "./KanbanBoard"; +export { + tasksSlice, + openTask, + closeTask, + updateTaskName, + addPlannerChat, + removePlannerChat, + setTaskActiveChat, + selectOpenTasks, + selectOpenTasksFromRoot, + selectTaskActiveChat, +} from "./tasksSlice"; +export type { OpenTask, TasksUIState } from "./tasksSlice"; diff --git a/refact-agent/gui/src/features/Tasks/tasksSlice.ts b/refact-agent/gui/src/features/Tasks/tasksSlice.ts new file mode 100644 index 000000000..1ed89f4b5 --- /dev/null +++ b/refact-agent/gui/src/features/Tasks/tasksSlice.ts @@ -0,0 +1,147 @@ +import { createSlice, PayloadAction } from "@reduxjs/toolkit"; +import { RootState } from "../../app/store"; + +type ActiveChat = + | { type: "planner"; chatId: string } + | { type: "agent"; cardId: string; chatId: string } + | null; + +export interface PlannerInfo { + id: string; + title: string; + createdAt: string; + updatedAt: string; + sessionState?: string; +} + +export interface OpenTask { + id: string; + name: string; + plannerChats: PlannerInfo[]; + activeChat: ActiveChat; +} + +export interface TasksUIState { + openTasks: OpenTask[]; +} + +const initialState: TasksUIState = { + openTasks: [], +}; + +export const tasksSlice = createSlice({ + name: "tasksUI", + initialState, + reducers: { + openTask: (state, action: PayloadAction<{ id: string; name: string }>) => { + const rawName = action.payload.name; + const sanitizedName = + (rawName && typeof rawName === "string" ? rawName.trim() : "") || + "Task"; + const existing = state.openTasks.find((t) => t.id === action.payload.id); + if (existing) { + // Update name if changed and new name is meaningful + if (sanitizedName !== "Task" && sanitizedName !== existing.name) { + existing.name = sanitizedName; + } + } else { + state.openTasks.push({ + id: action.payload.id, + name: sanitizedName, + plannerChats: [], + activeChat: null, + }); + } + }, + closeTask: (state, action: PayloadAction<string>) => { + state.openTasks = state.openTasks.filter((t) => t.id !== action.payload); + }, + updateTaskName: ( + state, + action: PayloadAction<{ id: string; name: string }>, + ) => { + const task = state.openTasks.find((t) => t.id === action.payload.id); + if (task) { + task.name = action.payload.name; + } + }, + addPlannerChat: ( + state, + action: PayloadAction<{ taskId: string; planner: PlannerInfo }>, + ) => { + const task = state.openTasks.find((t) => t.id === action.payload.taskId); + if ( + task && + !task.plannerChats.some((p) => p.id === action.payload.planner.id) + ) { + task.plannerChats.push(action.payload.planner); + } + }, + updatePlannerChat: ( + state, + action: PayloadAction<{ + taskId: string; + planner: Partial<PlannerInfo> & { id: string }; + }>, + ) => { + const task = state.openTasks.find((t) => t.id === action.payload.taskId); + if (task) { + const idx = task.plannerChats.findIndex( + (p) => p.id === action.payload.planner.id, + ); + if (idx !== -1) { + task.plannerChats[idx] = { + ...task.plannerChats[idx], + ...action.payload.planner, + }; + } + } + }, + removePlannerChat: ( + state, + action: PayloadAction<{ taskId: string; chatId: string }>, + ) => { + const task = state.openTasks.find((t) => t.id === action.payload.taskId); + if (task) { + task.plannerChats = task.plannerChats.filter( + (p) => p.id !== action.payload.chatId, + ); + } + }, + setTaskActiveChat: ( + state, + action: PayloadAction<{ taskId: string; activeChat: ActiveChat }>, + ) => { + const task = state.openTasks.find((t) => t.id === action.payload.taskId); + if (task) { + task.activeChat = action.payload.activeChat; + } + }, + }, + selectors: { + selectOpenTasks: (state) => state.openTasks, + }, +}); + +export const { + openTask, + closeTask, + updateTaskName, + addPlannerChat, + updatePlannerChat, + removePlannerChat, + setTaskActiveChat, +} = tasksSlice.actions; +export const { selectOpenTasks } = tasksSlice.selectors; + +// Selector that works with RootState +export const selectOpenTasksFromRoot = (state: RootState) => + state.tasksUI.openTasks; + +export const selectTaskActiveChat = ( + state: RootState, + taskId: string, +): ActiveChat => { + const task = state.tasksUI.openTasks.find((t) => t.id === taskId); + return task?.activeChat ?? null; +}; diff --git a/refact-agent/gui/src/features/ThreadHistory/ThreadHistory.tsx b/refact-agent/gui/src/features/ThreadHistory/ThreadHistory.tsx index cb693982d..c34824e78 100644 --- a/refact-agent/gui/src/features/ThreadHistory/ThreadHistory.tsx +++ b/refact-agent/gui/src/features/ThreadHistory/ThreadHistory.tsx @@ -1,10 +1,14 @@ -import { FC, useCallback } from "react"; +import { FC, useCallback, useMemo } from "react"; import { Config } from "../Config/configSlice"; -import { Button, Flex } from "@radix-ui/themes"; +import { Button, Flex, Spinner, Text } from "@radix-ui/themes"; import { ArrowLeftIcon } from "@radix-ui/react-icons"; import { ChatRawJSON } from "../../components/ChatRawJSON"; import { useAppDispatch, useAppSelector } from "../../hooks"; -import { getChatById } from "../History/historySlice"; +import { selectThreadById } from "../Chat/Thread/selectors"; +import { + useGetTrajectoryQuery, + trajectoryDataToChatThread, +} from "../../services/refact"; import { copyChatHistoryToClipboard } from "../../utils/copyChatHistoryToClipboard"; import { clearError, getErrorMessage, setError } from "../Errors/errorsSlice"; import { @@ -35,14 +39,34 @@ export const ThreadHistory: FC<ThreadHistoryProps> = ({ }) => { const dispatch = useAppDispatch(); - const historyThread = useAppSelector((state) => getChatById(state, chatId), { - devModeChecks: { stabilityCheck: "never" }, + const activeThread = useAppSelector((state) => + selectThreadById(state, chatId), + ); + + const { + data: trajectoryData, + isLoading, + error: fetchError, + } = useGetTrajectoryQuery(chatId, { + skip: Boolean(activeThread && activeThread.messages.length > 0), }); - const historyThreadToPass = historyThread && { - ...historyThread, - model: historyThread.model || "gpt-4o-mini", - }; + const historyThreadToPass = useMemo(() => { + if (activeThread && activeThread.messages.length > 0) { + return { + ...activeThread, + model: activeThread.model || "gpt-4o-mini", + }; + } + if (trajectoryData) { + const thread = trajectoryDataToChatThread(trajectoryData); + return { + ...thread, + model: thread.model || "gpt-4o-mini", + }; + } + return null; + }, [activeThread, trajectoryData]); const error = useAppSelector(getErrorMessage); const information = useAppSelector(getInformationMessage); @@ -54,15 +78,15 @@ export const ThreadHistory: FC<ThreadHistoryProps> = ({ ); const handleCopyToClipboardJSON = useCallback(() => { - if (!historyThread) { + if (!historyThreadToPass) { dispatch(setError("No history thread found")); return; } - void copyChatHistoryToClipboard(historyThread).then(() => { + void copyChatHistoryToClipboard(historyThreadToPass).then(() => { dispatch(setInformation("Chat history copied to clipboard")); }); - }, [dispatch, historyThread]); + }, [dispatch, historyThreadToPass]); const handleBackFromThreadHistory = useCallback( (customBackFunction: () => void) => { @@ -99,6 +123,19 @@ export const ThreadHistory: FC<ThreadHistoryProps> = ({ Back </Button> )} + {isLoading && ( + <Flex align="center" justify="center" py="6" gap="2"> + <Spinner size="2" /> + <Text size="2" color="gray"> + Loading thread history... + </Text> + </Flex> + )} + {fetchError && !historyThreadToPass && ( + <Text size="2" color="red"> + Failed to load thread history + </Text> + )} {historyThreadToPass && ( <ChatRawJSON thread={historyThreadToPass} diff --git a/refact-agent/gui/src/features/ToolConfirmation/confirmationSlice.ts b/refact-agent/gui/src/features/ToolConfirmation/confirmationSlice.ts deleted file mode 100644 index 129c62585..000000000 --- a/refact-agent/gui/src/features/ToolConfirmation/confirmationSlice.ts +++ /dev/null @@ -1,85 +0,0 @@ -import { createSlice, PayloadAction } from "@reduxjs/toolkit"; -import type { ToolConfirmationPauseReason } from "../../services/refact"; -import { ideToolCallResponse } from "../../hooks/useEventBusForIDE"; - -export type ConfirmationState = { - pauseReasons: ToolConfirmationPauseReason[]; - pause: boolean; - status: { - wasInteracted: boolean; - confirmationStatus: boolean; - }; -}; - -const initialState: ConfirmationState = { - pauseReasons: [], - pause: false, - status: { - wasInteracted: false, - confirmationStatus: true, - }, -}; - -type ConfirmationActionPayload = { - wasInteracted: boolean; - confirmationStatus: boolean; -}; - -export const confirmationSlice = createSlice({ - name: "confirmation", - initialState, - reducers: { - setPauseReasons( - state, - action: PayloadAction<ToolConfirmationPauseReason[]>, - ) { - state.pause = true; - state.pauseReasons = action.payload; - }, - resetConfirmationInteractedState(state) { - state.status.wasInteracted = false; - state.pause = false; - state.pauseReasons = []; - }, - clearPauseReasonsAndHandleToolsStatus( - state, - action: PayloadAction<ConfirmationActionPayload>, - ) { - state.pause = false; - state.pauseReasons = []; - state.status = action.payload; - }, - - updateConfirmationAfterIdeToolUse( - state, - action: PayloadAction<Parameters<typeof ideToolCallResponse>[0]>, - ) { - const pauseReasons = state.pauseReasons.filter( - (reason) => reason.tool_call_id !== action.payload.toolCallId, - ); - if (pauseReasons.length === 0) { - state.status.wasInteracted = true; // work around for auto send. - } - state.pauseReasons = pauseReasons; - }, - }, - selectors: { - getPauseReasonsWithPauseStatus: (state) => state, - getToolsInteractionStatus: (state) => state.status.wasInteracted, - getToolsConfirmationStatus: (state) => state.status.confirmationStatus, - getConfirmationPauseStatus: (state) => state.pause, - }, -}); - -export const { - setPauseReasons, - resetConfirmationInteractedState, - clearPauseReasonsAndHandleToolsStatus, - updateConfirmationAfterIdeToolUse, -} = confirmationSlice.actions; -export const { - getPauseReasonsWithPauseStatus, - getToolsConfirmationStatus, - getToolsInteractionStatus, - getConfirmationPauseStatus, -} = confirmationSlice.selectors; diff --git a/refact-agent/gui/src/features/Tour.tsx b/refact-agent/gui/src/features/Tour.tsx deleted file mode 100644 index 65ef510a8..000000000 --- a/refact-agent/gui/src/features/Tour.tsx +++ /dev/null @@ -1,136 +0,0 @@ -import { createAction, createReducer } from "@reduxjs/toolkit"; -import { createContext, useContext, useState } from "react"; - -type TourInProgress = { - type: "in_progress"; - step: number; -}; - -type TourClosed = { - type: "closed"; - step: number; -}; - -type TourFinished = { - type: "finished"; -}; - -export type TourState = TourInProgress | TourClosed | TourFinished; - -const initialState: TourState = { - type: "in_progress", - step: 1, -}; - -// eslint-disable-next-line react-refresh/only-export-components -export const next = createAction("tour/next"); -// eslint-disable-next-line react-refresh/only-export-components -export const close = createAction("tour/close"); -// eslint-disable-next-line react-refresh/only-export-components -export const finish = createAction("tour/finish"); -// eslint-disable-next-line react-refresh/only-export-components -export const restart = createAction("tour/restart"); - -// eslint-disable-next-line react-refresh/only-export-components -export const tourReducer = createReducer<TourState>(initialState, (builder) => { - builder.addCase(next, (state) => { - if (state.type === "in_progress") { - return { - ...state, - step: state.step + 1, - }; - } - return state; - }); - builder.addCase(close, (state) => { - if (state.type === "in_progress") { - return { - ...state, - type: "closed", - }; - } - return state; - }); - builder.addCase(finish, () => { - return { type: "finished" }; - }); - builder.addCase(restart, () => { - return { type: "in_progress", step: 1 }; - }); -}); - -export type TourRefs = { - newChat: null | HTMLButtonElement; - useTools: null | HTMLDivElement; - useModel: null | HTMLDivElement; - chat: null | HTMLDivElement; - openInNewTab: null | HTMLButtonElement; - back: null | HTMLAnchorElement; - f1: null | HTMLButtonElement; - more: null | HTMLButtonElement; - setupIntegrations: null | HTMLButtonElement; - setNewChat: (x: HTMLButtonElement | null) => void; - setUseTools: (x: HTMLDivElement | null) => void; - setUseModel: (x: HTMLDivElement | null) => void; - setChat: (x: HTMLDivElement | null) => void; - setOpenInNewTab: (x: HTMLButtonElement | null) => void; - setBack: (x: HTMLAnchorElement | null) => void; - setF1: (x: HTMLButtonElement | null) => void; - setMore: (x: HTMLButtonElement | null) => void; - setSetupIntegrations: (x: HTMLButtonElement | null) => void; -}; - -// eslint-disable-next-line @typescript-eslint/no-non-null-assertion -const TourContext = createContext<TourRefs>(null!); - -type TourContextProps = { - children: React.ReactNode; -}; -// TODO: having a component here causes the linter warnings, Tour a directory, with separate files should for the component and actions fix this -export const TourProvider = ({ children }: TourContextProps) => { - const [newChat, setNewChat] = useState<null | HTMLButtonElement>(null); - const [useTools, setUseTools] = useState<null | HTMLDivElement>(null); - const [useModel, setUseModel] = useState<null | HTMLDivElement>(null); - const [chat, setChat] = useState<null | HTMLDivElement>(null); - const [openInNewTab, setOpenInNewTab] = useState<null | HTMLButtonElement>( - null, - ); - const [back, setBack] = useState<null | HTMLAnchorElement>(null); - const [f1, setF1] = useState<null | HTMLButtonElement>(null); - const [more, setMore] = useState<null | HTMLButtonElement>(null); - const [setupIntegrations, setSetupIntegrations] = - useState<null | HTMLButtonElement>(null); - - return ( - <TourContext.Provider - value={{ - newChat, - useTools, - useModel, - chat, - openInNewTab, - back, - f1, - more, - setupIntegrations, - setNewChat, - setUseTools, - setUseModel, - setChat, - setOpenInNewTab, - setBack, - setF1, - setMore, - setSetupIntegrations, - }} - > - {children} - </TourContext.Provider> - ); -}; - -// eslint-disable-next-line react-refresh/only-export-components -export const useTourRefs = () => { - const context = useContext(TourContext); - return context; -}; diff --git a/refact-agent/gui/src/hooks/index.ts b/refact-agent/gui/src/hooks/index.ts index 22d2c9e15..bb29d8349 100644 --- a/refact-agent/gui/src/hooks/index.ts +++ b/refact-agent/gui/src/hooks/index.ts @@ -20,7 +20,7 @@ export * from "./useAppearance"; export * from "./useConfig"; export * from "./useAppDispatch"; export * from "./useAppSelector"; -export * from "./useSendChatRequest"; +export * from "./useChatActions"; export * from "./useGetUserSurvey"; export * from "./useLinksFromLsp"; export * from "./useGoToLink"; @@ -31,10 +31,19 @@ export * from "./useCapsForToolUse"; export * from "./useCanUseTools"; export * from "./useCopyToClipboard"; export * from "./useResizeObserver"; -export * from "./useCompressChat"; export * from "./useAutoFocusOnce"; export * from "./useHideScroll"; -export * from "./useCompressionStop"; export * from "./useEventBusForApp"; export * from "./useTotalCostForChat"; export * from "./useCheckpoints"; +export * from "./useSidebarSubscription"; +export * from "./useChatSubscription"; +export * from "./useAllChatsSubscription"; +export * from "./useDraftMessage"; + +export * from "./useProvidersQuery"; + +export * from "./useBrowserOnlineStatus"; +export * from "./useLoadMoreHistory"; +export * from "./useThinking"; +export * from "./useShiki"; diff --git a/refact-agent/gui/src/hooks/useAllChatsSubscription.ts b/refact-agent/gui/src/hooks/useAllChatsSubscription.ts new file mode 100644 index 000000000..17a33ebb4 --- /dev/null +++ b/refact-agent/gui/src/hooks/useAllChatsSubscription.ts @@ -0,0 +1,437 @@ +import { useEffect, useRef, useCallback } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { useAppSelector } from "./useAppSelector"; +import { + applyChatEvent, + clearSseRefreshRequest, +} from "../features/Chat/Thread/actions"; +import { + selectCurrentThreadId, + selectOpenThreadIds, + selectSseRefreshRequested, +} from "../features/Chat/Thread/selectors"; +import { selectLspPort, selectApiKey } from "../features/Config/configSlice"; +import { subscribeToChatEvents } from "../services/refact/chatSubscription"; +import { + setSseStatus, + sseEventReceived, + removeSseConnection, + clearAllSseConnections, +} from "../features/Connection"; +import { calculateBackoff } from "../utils/backoff"; +import type { ChatEventEnvelope } from "../services/refact/chatSubscription"; + +export function useAllChatsSubscription() { + const dispatch = useAppDispatch(); + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + const currentThreadId = useAppSelector(selectCurrentThreadId); + const openThreadIds = useAppSelector(selectOpenThreadIds); + const sseRefreshRequested = useAppSelector(selectSseRefreshRequested); + + const subscriptionsRef = useRef<Map<string, () => void>>(new Map()); + const seqMapRef = useRef<Map<string, bigint>>(new Map()); + const manualCloseRef = useRef<Set<string>>(new Set()); + const desiredIdsRef = useRef<Set<string>>(new Set()); + const retryCountRef = useRef<Map<string, number>>(new Map()); + const timeoutRef = useRef<Map<string, ReturnType<typeof setTimeout>>>( + new Map(), + ); + const lastActivityDispatchRef = useRef<Map<string, number>>(new Map()); + const lastActivityAtRef = useRef<Map<string, number>>(new Map()); + const streamDeltaFlushRef = useRef< + Map<string, ReturnType<typeof setTimeout>> + >(new Map()); + const pendingStreamDeltaRef = useRef< + Map<string, Extract<ChatEventEnvelope, { type: "stream_delta" }>> + >(new Map()); + const streamedBytesRef = useRef<Map<string, number>>(new Map()); + const portRef = useRef(port); + const apiKeyRef = useRef(apiKey); + const subscribeRef = useRef<((chatId: string) => void) | null>(null); + const enqueueStreamDeltaRef = useRef< + | (( + chatId: string, + envelope: Extract<ChatEventEnvelope, { type: "stream_delta" }>, + ) => void) + | null + >(null); + const flushPendingStreamDeltaForChatRef = useRef< + ((chatId: string) => void) | null + >(null); + const clearStreamDeltaFlushForChatRef = useRef< + ((chatId: string) => void) | null + >(null); + + const STALE_THRESHOLD_MS = 45_000; + + const ACTIVITY_THROTTLE_MS = 500; + const MAX_MERGED_DELTA_OPS = 256; + + // Adaptive flush thresholds (bytes of accumulated content) + const FLUSH_TIER_FAST_BYTES = 8_192; + const FLUSH_TIER_MEDIUM_BYTES = 200_000; + // Flush intervals per tier (ms) + const FLUSH_MS_FAST = 0; // RAF (~16ms) + const FLUSH_MS_MEDIUM = 150; + const FLUSH_MS_SLOW = 500; + const FLUSH_MS_BACKGROUND = 500; + // Hard cap: force flush if buffered text exceeds this + const MAX_BUFFERED_BYTES = 2_000_000; + + const activeChatId = currentThreadId; + + const clearPendingTimeout = useCallback((chatId: string) => { + const existingTimeout = timeoutRef.current.get(chatId); + if (existingTimeout) { + clearTimeout(existingTimeout); + timeoutRef.current.delete(chatId); + } + }, []); + + const clearStreamDeltaFlushForChat = useCallback((chatId: string) => { + const timerId = streamDeltaFlushRef.current.get(chatId); + if (timerId != null) { + clearTimeout(timerId); + streamDeltaFlushRef.current.delete(chatId); + } + }, []); + + const flushPendingStreamDeltaForChat = useCallback( + (chatId: string) => { + const pending = pendingStreamDeltaRef.current.get(chatId); + if (!pending) return; + pendingStreamDeltaRef.current.delete(chatId); + dispatch(applyChatEvent(pending)); + }, + [dispatch], + ); + + const getFlushDelayMs = useCallback( + (chatId: string): number => { + const isActive = chatId === activeChatId; + if (!isActive) return FLUSH_MS_BACKGROUND; + const bytes = streamedBytesRef.current.get(chatId) ?? 0; + if (bytes < FLUSH_TIER_FAST_BYTES) return FLUSH_MS_FAST; + if (bytes < FLUSH_TIER_MEDIUM_BYTES) return FLUSH_MS_MEDIUM; + return FLUSH_MS_SLOW; + }, + [activeChatId], + ); + + const scheduleStreamDeltaFlushForChat = useCallback( + (chatId: string) => { + if (streamDeltaFlushRef.current.has(chatId)) return; + + const delayMs = getFlushDelayMs(chatId); + + const flush = () => { + streamDeltaFlushRef.current.delete(chatId); + flushPendingStreamDeltaForChat(chatId); + }; + + const id = setTimeout(flush, Math.max(delayMs, 0)); + streamDeltaFlushRef.current.set(chatId, id); + }, + [flushPendingStreamDeltaForChat, getFlushDelayMs], + ); + + const enqueueStreamDelta = useCallback( + ( + chatId: string, + envelope: Extract<ChatEventEnvelope, { type: "stream_delta" }>, + ) => { + // Track accumulated content bytes for adaptive throttle + for (const op of envelope.ops) { + if (op.op === "append_content" || op.op === "append_reasoning") { + const prev = streamedBytesRef.current.get(chatId) ?? 0; + streamedBytesRef.current.set(chatId, prev + op.text.length); + } + } + + const pending = pendingStreamDeltaRef.current.get(chatId); + if (pending && pending.message_id === envelope.message_id) { + const mergedOpsLen = pending.ops.length + envelope.ops.length; + if (mergedOpsLen <= MAX_MERGED_DELTA_OPS) { + pending.seq = envelope.seq; + pending.ops.push(...envelope.ops); + } else { + flushPendingStreamDeltaForChat(chatId); + pendingStreamDeltaRef.current.set(chatId, envelope); + } + } else { + flushPendingStreamDeltaForChat(chatId); + pendingStreamDeltaRef.current.set(chatId, envelope); + } + + // Force immediate flush if buffered text is too large + const bufferedBytes = streamedBytesRef.current.get(chatId) ?? 0; + if (bufferedBytes > MAX_BUFFERED_BYTES) { + clearStreamDeltaFlushForChat(chatId); + flushPendingStreamDeltaForChat(chatId); + return; + } + + scheduleStreamDeltaFlushForChat(chatId); + }, + [ + flushPendingStreamDeltaForChat, + scheduleStreamDeltaFlushForChat, + clearStreamDeltaFlushForChat, + ], + ); + + enqueueStreamDeltaRef.current = enqueueStreamDelta; + flushPendingStreamDeltaForChatRef.current = flushPendingStreamDeltaForChat; + clearStreamDeltaFlushForChatRef.current = clearStreamDeltaFlushForChat; + + const scheduleResubscribe = useCallback( + (chatId: string, useBackoff = false) => { + clearPendingTimeout(chatId); + + const retryCount = retryCountRef.current.get(chatId) ?? 0; + const delay = useBackoff ? calculateBackoff(retryCount) : 100; + + const timeoutId = setTimeout(() => { + timeoutRef.current.delete(chatId); + if (!desiredIdsRef.current.has(chatId)) return; + if (subscriptionsRef.current.has(chatId)) return; + subscribeRef.current?.(chatId); + }, delay); + + timeoutRef.current.set(chatId, timeoutId); + }, + [clearPendingTimeout], + ); + + const subscribeToChat = useCallback( + (chatId: string) => { + if (subscriptionsRef.current.has(chatId)) return; + if (!portRef.current) return; + if (!desiredIdsRef.current.has(chatId)) return; + + manualCloseRef.current.delete(chatId); + seqMapRef.current.set(chatId, 0n); + + dispatch(setSseStatus({ chatId, status: "connecting" })); + + const unsubscribe = subscribeToChatEvents( + chatId, + portRef.current, + { + onEvent: (envelope) => { + const seq = BigInt(envelope.seq); + const lastSeq = seqMapRef.current.get(chatId) ?? 0n; + + if (envelope.type === "snapshot") { + flushPendingStreamDeltaForChatRef.current?.(chatId); + streamedBytesRef.current.delete(chatId); + seqMapRef.current.set(chatId, seq); + retryCountRef.current.set(chatId, 0); + dispatch(setSseStatus({ chatId, status: "connected" })); + } else { + if (seq <= lastSeq) return; + if (seq > lastSeq + 1n) { + flushPendingStreamDeltaForChatRef.current?.(chatId); + const unsub = subscriptionsRef.current.get(chatId); + if (unsub) { + manualCloseRef.current.add(chatId); + unsub(); + subscriptionsRef.current.delete(chatId); + } + dispatch(setSseStatus({ chatId, status: "connecting" })); + scheduleResubscribe(chatId, false); + return; + } + seqMapRef.current.set(chatId, seq); + } + if (envelope.type === "stream_delta") { + enqueueStreamDeltaRef.current?.(chatId, envelope); + } else { + flushPendingStreamDeltaForChatRef.current?.(chatId); + if (envelope.type === "stream_finished") { + streamedBytesRef.current.delete(chatId); + } + dispatch(applyChatEvent(envelope)); + } + }, + onConnected: () => { + dispatch(setSseStatus({ chatId, status: "connected" })); + }, + onError: (error) => { + clearStreamDeltaFlushForChatRef.current?.(chatId); + flushPendingStreamDeltaForChatRef.current?.(chatId); + subscriptionsRef.current.delete(chatId); + const count = (retryCountRef.current.get(chatId) ?? 0) + 1; + retryCountRef.current.set(chatId, count); + dispatch( + setSseStatus({ + chatId, + status: "disconnected", + error: error.message, + }), + ); + if (!manualCloseRef.current.has(chatId)) { + scheduleResubscribe(chatId, true); + } + }, + onDisconnected: () => { + clearStreamDeltaFlushForChatRef.current?.(chatId); + flushPendingStreamDeltaForChatRef.current?.(chatId); + subscriptionsRef.current.delete(chatId); + const count = (retryCountRef.current.get(chatId) ?? 0) + 1; + retryCountRef.current.set(chatId, count); + dispatch(setSseStatus({ chatId, status: "disconnected" })); + if (!manualCloseRef.current.has(chatId)) { + scheduleResubscribe(chatId, true); + } + }, + onActivity: () => { + const now = Date.now(); + lastActivityAtRef.current.set(chatId, now); + const lastDispatch = + lastActivityDispatchRef.current.get(chatId) ?? 0; + if (now - lastDispatch >= ACTIVITY_THROTTLE_MS) { + lastActivityDispatchRef.current.set(chatId, now); + dispatch(sseEventReceived({ chatId })); + } + }, + }, + apiKeyRef.current ?? undefined, + ); + + subscriptionsRef.current.set(chatId, unsubscribe); + }, + [dispatch, scheduleResubscribe], + ); + + subscribeRef.current = subscribeToChat; + const subscribe = subscribeToChat; + + const unsubscribe = useCallback( + (chatId: string) => { + manualCloseRef.current.add(chatId); + clearPendingTimeout(chatId); + clearStreamDeltaFlushForChat(chatId); + pendingStreamDeltaRef.current.delete(chatId); + streamedBytesRef.current.delete(chatId); + const unsub = subscriptionsRef.current.get(chatId); + if (unsub) { + unsub(); + subscriptionsRef.current.delete(chatId); + seqMapRef.current.delete(chatId); + retryCountRef.current.delete(chatId); + lastActivityDispatchRef.current.delete(chatId); + lastActivityAtRef.current.delete(chatId); + dispatch(removeSseConnection({ chatId })); + } + }, + [dispatch, clearPendingTimeout, clearStreamDeltaFlushForChat], + ); + + const unsubscribeAll = useCallback(() => { + for (const chatId of subscriptionsRef.current.keys()) { + manualCloseRef.current.add(chatId); + } + for (const unsub of subscriptionsRef.current.values()) { + unsub(); + } + for (const timeoutId of timeoutRef.current.values()) { + clearTimeout(timeoutId); + } + for (const flushId of streamDeltaFlushRef.current.values()) { + clearTimeout(flushId); + } + subscriptionsRef.current.clear(); + seqMapRef.current.clear(); + manualCloseRef.current.clear(); + desiredIdsRef.current.clear(); + retryCountRef.current.clear(); + timeoutRef.current.clear(); + lastActivityDispatchRef.current.clear(); + lastActivityAtRef.current.clear(); + streamDeltaFlushRef.current.clear(); + pendingStreamDeltaRef.current.clear(); + streamedBytesRef.current.clear(); + dispatch(clearAllSseConnections()); + }, [dispatch]); + + useEffect(() => { + if (port !== portRef.current || apiKey !== apiKeyRef.current) { + unsubscribeAll(); + portRef.current = port; + apiKeyRef.current = apiKey; + } + + if (!port) return; + + const desired = new Set(openThreadIds); + if (activeChatId) desired.add(activeChatId); + desiredIdsRef.current = desired; + const subscribedIds = Array.from(subscriptionsRef.current.keys()); + + for (const id of subscribedIds) { + if (!desiredIdsRef.current.has(id)) { + unsubscribe(id); + } + } + + for (const id of desiredIdsRef.current) { + if (!subscriptionsRef.current.has(id)) { + subscribe(id); + } + } + }, [ + activeChatId, + openThreadIds, + port, + apiKey, + subscribe, + unsubscribe, + unsubscribeAll, + ]); + + useEffect(() => { + if (!sseRefreshRequested) return; + if (!portRef.current) return; + + dispatch(clearSseRefreshRequest()); + unsubscribe(sseRefreshRequested); + setTimeout(() => subscribe(sseRefreshRequested), 50); + }, [sseRefreshRequested, dispatch, subscribe, unsubscribe]); + + useEffect(() => { + return () => { + unsubscribeAll(); + }; + }, [unsubscribeAll]); + + useEffect(() => { + const handleVisibilityChange = () => { + if (document.visibilityState === "visible") { + for (const chatId of desiredIdsRef.current) { + const lastActivity = lastActivityAtRef.current.get(chatId) ?? 0; + const isStale = + lastActivity > 0 && Date.now() - lastActivity > STALE_THRESHOLD_MS; + + if (isStale && subscriptionsRef.current.has(chatId)) { + retryCountRef.current.set(chatId, 0); + unsubscribe(chatId); + subscribe(chatId); + continue; + } + + if (!subscriptionsRef.current.has(chatId)) { + retryCountRef.current.set(chatId, 0); + subscribe(chatId); + } + } + } + }; + + document.addEventListener("visibilitychange", handleVisibilityChange); + return () => { + document.removeEventListener("visibilitychange", handleVisibilityChange); + }; + }, [subscribe, unsubscribe]); +} diff --git a/refact-agent/gui/src/hooks/useAttachedImages.ts b/refact-agent/gui/src/hooks/useAttachedImages.ts index fb0062c65..cc88ebf91 100644 --- a/refact-agent/gui/src/hooks/useAttachedImages.ts +++ b/refact-agent/gui/src/hooks/useAttachedImages.ts @@ -2,35 +2,55 @@ import { useCallback, useEffect } from "react"; import { useAppSelector } from "./useAppSelector"; import { useAppDispatch } from "./useAppDispatch"; import { - selectAllImages, - removeImageByIndex, - addImage, + selectThreadImages, + selectThreadTextFiles, + selectChatId, + addThreadImage, + removeThreadImageByIndex, + resetThreadImages, + addThreadTextFile, + removeThreadTextFileByIndex, + resetThreadTextFiles, type ImageFile, - resetAttachedImagesSlice, -} from "../features/AttachedImages"; + type TextFile, +} from "../features/Chat"; import { setError } from "../features/Errors/errorsSlice"; import { setInformation } from "../features/Errors/informationSlice"; import { useCapsForToolUse } from "./useCapsForToolUse"; export function useAttachedImages() { - const images = useAppSelector(selectAllImages); + const images = useAppSelector(selectThreadImages); + const textFiles = useAppSelector(selectThreadTextFiles); + const chatId = useAppSelector(selectChatId); const { isMultimodalitySupportedForCurrentModel } = useCapsForToolUse(); const dispatch = useAppDispatch(); const removeImage = useCallback( (index: number) => { - const action = removeImageByIndex(index); - dispatch(action); + dispatch(removeThreadImageByIndex({ id: chatId, index })); }, - [dispatch], + [dispatch, chatId], ); const insertImage = useCallback( (file: ImageFile) => { - const action = addImage(file); - dispatch(action); + dispatch(addThreadImage({ id: chatId, image: file })); }, - [dispatch], + [dispatch, chatId], + ); + + const removeTextFile = useCallback( + (index: number) => { + dispatch(removeThreadTextFileByIndex({ id: chatId, index })); + }, + [dispatch, chatId], + ); + + const insertTextFile = useCallback( + (file: TextFile) => { + dispatch(addThreadTextFile({ id: chatId, file })); + }, + [dispatch, chatId], ); const handleError = useCallback( @@ -61,20 +81,34 @@ export function useAttachedImages() { [handleError, handleWarning, insertImage], ); + const processAndInsertTextFiles = useCallback( + (files: File[]) => { + void processTextFiles(files, insertTextFile, handleError); + }, + [handleError, insertTextFile], + ); + + const resetAllTextFiles = useCallback(() => { + dispatch(resetThreadTextFiles({ id: chatId })); + }, [dispatch, chatId]); + useEffect(() => { if (!isMultimodalitySupportedForCurrentModel) { - const action = resetAttachedImagesSlice(); - dispatch(action); + dispatch(resetThreadImages({ id: chatId })); } - }, [isMultimodalitySupportedForCurrentModel, dispatch]); + }, [isMultimodalitySupportedForCurrentModel, dispatch, chatId]); return { images, + textFiles, setError: handleError, setWarning: handleWarning, insertImage, removeImage, processAndInsertImages, + removeTextFile, + processAndInsertTextFiles, + resetAllTextFiles, }; } @@ -86,7 +120,7 @@ async function processImages( ) { for (const file of files) { if (file.type !== "image/jpeg" && file.type !== "image/png") { - onError(`file ${file.type} is not a supported. Use jpeg or png`); + onError(`file ${file.type} is not supported. Use jpeg or png`); } else { try { const scaledImage = await scaleImage(file, 800); @@ -106,6 +140,33 @@ async function processImages( } } } + +async function processTextFiles( + files: File[], + onSuccess: (file: TextFile) => void, + onError: (reason: string) => void, +) { + for (const file of files) { + try { + const content = await readTextFile(file); + onSuccess({ name: file.name, content }); + } catch (error) { + onError(`file ${file.name} processing has failed`); + } + } +} + +function readTextFile(file: File): Promise<string> { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onload = () => { + resolve(reader.result as string); + }; + reader.onabort = () => reject("abort"); + reader.onerror = () => reject("error"); + reader.readAsText(file); + }); +} function scaleImage(file: File, maxSize: number): Promise<string> { return new Promise((resolve, reject) => { const reader = new FileReader(); diff --git a/refact-agent/gui/src/hooks/useBrowserOnlineStatus.ts b/refact-agent/gui/src/hooks/useBrowserOnlineStatus.ts new file mode 100644 index 000000000..f14f3918d --- /dev/null +++ b/refact-agent/gui/src/hooks/useBrowserOnlineStatus.ts @@ -0,0 +1,27 @@ +import { useEffect } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { setBrowserOnline } from "../features/Connection"; + +export function useBrowserOnlineStatus() { + const dispatch = useAppDispatch(); + + useEffect(() => { + const handleOnline = () => { + dispatch(setBrowserOnline(true)); + }; + + const handleOffline = () => { + dispatch(setBrowserOnline(false)); + }; + + window.addEventListener("online", handleOnline); + window.addEventListener("offline", handleOffline); + + dispatch(setBrowserOnline(navigator.onLine)); + + return () => { + window.removeEventListener("online", handleOnline); + window.removeEventListener("offline", handleOffline); + }; + }, [dispatch]); +} diff --git a/refact-agent/gui/src/hooks/useCapsForToolUse.ts b/refact-agent/gui/src/hooks/useCapsForToolUse.ts index 48e802a50..acb1001e6 100644 --- a/refact-agent/gui/src/hooks/useCapsForToolUse.ts +++ b/refact-agent/gui/src/hooks/useCapsForToolUse.ts @@ -1,16 +1,14 @@ import { useCallback, useEffect, useMemo, useState } from "react"; -import { selectThreadToolUse } from "../features/Chat/Thread/selectors"; +import { selectThreadMode } from "../features/Chat/Thread/selectors"; import { useAppSelector, useGetCapsQuery, useAppDispatch } from "."; +import { useGetChatModesQuery } from "../services/refact/chatModes"; import { getSelectedChatModel, setChatModel, setMaxNewTokens, - setToolUse, - ToolUse, } from "../features/Chat"; -// TODO: hard coded for now. export const PAID_AGENT_LIST = [ "gpt-4o", "claude-3-5-sonnet", @@ -20,21 +18,34 @@ export const PAID_AGENT_LIST = [ "claude-3-7-sonnet", ]; -// TODO: hard coded for now. Unlimited usage models export const UNLIMITED_PRO_MODELS_LIST = ["gpt-4o-mini"]; export function useCapsForToolUse() { const [wasAdjusted, setWasAdjusted] = useState(false); const caps = useGetCapsQuery(); - const toolUse = useAppSelector(selectThreadToolUse); + const modesQuery = useGetChatModesQuery(undefined); + const currentMode = useAppSelector(selectThreadMode); const dispatch = useAppDispatch(); const defaultCap = caps.data?.chat_default_model ?? ""; - const selectedModel = useAppSelector(getSelectedChatModel); - const currentModel = selectedModel || defaultCap; + const modeInfo = useMemo(() => { + if (!modesQuery.data?.modes) return null; + return modesQuery.data.modes.find((m) => m.id === currentMode) ?? null; + }, [modesQuery.data?.modes, currentMode]); + + const modeRequiresTools = useMemo(() => { + if (!modeInfo) return false; + return modeInfo.tools_count > 0; + }, [modeInfo]); + + const modeRequiresAgent = useMemo(() => { + if (!modeInfo) return false; + return modeInfo.ui.tags.includes("editing"); + }, [modeInfo]); + const setCapModel = useCallback( (value: string) => { const model = @@ -75,42 +86,18 @@ export function useCapsForToolUse() { const usableModels = useMemo(() => { const models = caps.data?.chat_models ?? {}; - const items = Object.entries(models).reduce<string[]>( - (acc, [key, value]) => { - if (toolUse === "explore" && value.supports_tools) { - return [...acc, key]; - } - if (toolUse === "agent" && value.supports_agent) return [...acc, key]; - if (toolUse === "quick") return [...acc, key]; - return acc; - }, - [], - ); - return items; - }, [caps.data?.chat_models, toolUse]); + return Object.keys(models); + }, [caps.data?.chat_models]); const usableModelsForPlan = useMemo(() => { - // TODO: keep filtering logic for the future BYOK + Cloud (to show different providers) - // if (user.data?.inference !== "FREE") return usableModels; - // if (!usage.aboveUsageLimit && toolUse === "agent") return usableModels; return usableModels.map((model) => { - // if (!PAID_AGENT_LIST.includes(model)) return model; - return { value: model, disabled: false, - textValue: - // toolUse !== "agent" ? `${model} (Available in agent)` : undefined, - model, + textValue: model, }; }); - // return usableModels; - }, [ - // user.data?.inference, - usableModels, - // toolUse, - // usage.aboveUsageLimit, - ]); + }, [usableModels]); useEffect(() => { if (usableModelsForPlan.length > 0) { @@ -120,40 +107,16 @@ export function useCapsForToolUse() { const toChange = models.find((elem) => currentModel === elem) ?? models[0]; - setCapModel(toChange); + if (toChange && toChange !== currentModel) { + setCapModel(toChange); + } } }, [setCapModel, currentModel, usableModels, usableModelsForPlan]); useEffect(() => { - const determineNewToolUse = (): ToolUse | null => { - if (toolUse === "agent" && modelsSupportingAgent.length === 0) { - return "explore"; - } - if (toolUse === "explore" && modelsSupportingTools.length === 0) { - return "quick"; - } - return null; - }; - - const handleAutomaticToolUseChange = () => { - if (!caps.isSuccess || wasAdjusted) return; - - const newToolUse = determineNewToolUse(); - if (newToolUse) { - dispatch(setToolUse(newToolUse)); - } - setWasAdjusted(true); - }; - - handleAutomaticToolUseChange(); - }, [ - dispatch, - wasAdjusted, - caps.isSuccess, - toolUse, - modelsSupportingAgent, - modelsSupportingTools, - ]); + if (!caps.isSuccess || wasAdjusted) return; + setWasAdjusted(true); + }, [caps.isSuccess, wasAdjusted]); return { usableModels, @@ -164,5 +127,9 @@ export function useCapsForToolUse() { loading: !caps.data && (caps.isFetching || caps.isLoading), uninitialized: caps.isUninitialized, data: caps.data, + modelsSupportingTools, + modelsSupportingAgent, + modeRequiresTools, + modeRequiresAgent, }; } diff --git a/refact-agent/gui/src/hooks/useChatActions.ts b/refact-agent/gui/src/hooks/useChatActions.ts new file mode 100644 index 000000000..05527ce60 --- /dev/null +++ b/refact-agent/gui/src/hooks/useChatActions.ts @@ -0,0 +1,291 @@ +import { useCallback } from "react"; +import { useAppSelector } from "./useAppSelector"; +import { useAppDispatch } from "./useAppDispatch"; +import { selectLspPort, selectApiKey } from "../features/Config/configSlice"; +import { + selectChatId, + selectThread, + selectThreadImages, + selectSendImmediately, + selectMessages, +} from "../features/Chat/Thread/selectors"; +import { resetThreadImages, setSendImmediately } from "../features/Chat/Thread"; +import { buildThreadParamsPatch } from "../features/Chat/Thread/actions"; +import { + sendUserMessage, + retryFromIndex as retryFromIndexApi, + regenerate as regenerateApi, + updateChatParams, + abortGeneration, + respondToToolConfirmation, + respondToToolConfirmations, + updateMessage as updateMessageApi, + removeMessage as removeMessageApi, + cancelQueuedItem, + type MessageContent, +} from "../services/refact/chatCommands"; +import type { UserMessage } from "../services/refact/types"; + +type ContentItem = + | { type: "text"; text: string } + | { type: "image_url"; image_url: { url: string } }; + +function convertUserMessageContent( + newContent: UserMessage["content"], +): MessageContent { + if (typeof newContent === "string") { + return newContent; + } + if (!Array.isArray(newContent)) { + return ""; + } + const mapped: ContentItem[] = []; + for (const item of newContent) { + if ("type" in item) { + if (item.type === "text" && "text" in item) { + mapped.push({ type: "text", text: item.text }); + } else if ("image_url" in item) { + mapped.push({ type: "image_url", image_url: item.image_url }); + } + } else if ("m_type" in item && "m_content" in item) { + const { m_type, m_content } = item; + if (m_type === "text") { + mapped.push({ type: "text", text: String(m_content) }); + } else if (m_type.startsWith("image/")) { + mapped.push({ + type: "image_url", + image_url: { url: `data:${m_type};base64,${String(m_content)}` }, + }); + } + } + } + return mapped.length > 0 ? mapped : ""; +} + +export function useChatActions() { + const dispatch = useAppDispatch(); + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + const chatId = useAppSelector(selectChatId); + const thread = useAppSelector(selectThread); + const attachedImages = useAppSelector(selectThreadImages); + const sendImmediately = useAppSelector(selectSendImmediately); + const messages = useAppSelector(selectMessages); + + /** + * Build message content with attached images if any. + */ + const buildMessageContent = useCallback( + (text: string): MessageContent => { + if (attachedImages.length === 0) { + return text; + } + + const imageContents: { type: "image_url"; image_url: { url: string } }[] = + []; + for (const img of attachedImages) { + if (typeof img.content === "string") { + imageContents.push({ + type: "image_url", + image_url: { url: img.content }, + }); + } + } + + if (imageContents.length === 0) { + return text; + } + + if (text.trim().length === 0) { + return imageContents; + } + + return [{ type: "text" as const, text }, ...imageContents]; + }, + [attachedImages], + ); + + const submit = useCallback( + async (question: string, priority?: boolean) => { + if (!chatId || !port) return; + + const content = buildMessageContent(question); + const isEmpty = + typeof content === "string" + ? content.trim().length === 0 + : content.length === 0; + if (isEmpty) return; + + if (messages.length === 0 && thread) { + const patch = buildThreadParamsPatch(thread, true); + if (Object.keys(patch).length > 0) { + await updateChatParams(chatId, patch, port, apiKey ?? undefined); + } + } + + const shouldPrioritize = priority ?? sendImmediately; + await sendUserMessage( + chatId, + content, + port, + apiKey ?? undefined, + shouldPrioritize, + ); + dispatch(resetThreadImages({ id: chatId })); + dispatch(setSendImmediately(false)); + }, + [ + chatId, + port, + apiKey, + buildMessageContent, + dispatch, + sendImmediately, + messages, + thread, + ], + ); + + /** + * Abort the current generation. + */ + const abort = useCallback(async () => { + if (!chatId || !port) return; + await abortGeneration(chatId, port, apiKey ?? undefined); + }, [chatId, port, apiKey]); + + /** + * Update chat parameters (model, mode, etc.). + */ + const setParams = useCallback( + async (params: { + model?: string; + mode?: string; + boost_reasoning?: boolean; + }) => { + if (!chatId || !port) return; + await updateChatParams(chatId, params, port, apiKey ?? undefined); + }, + [chatId, port, apiKey], + ); + + /** + * Respond to tool confirmation (accept or reject). + */ + const respondToTool = useCallback( + async (toolCallId: string, accepted: boolean) => { + if (!chatId || !port) return; + await respondToToolConfirmation( + chatId, + toolCallId, + accepted, + port, + apiKey ?? undefined, + ); + }, + [chatId, port, apiKey], + ); + + /** + * Respond to multiple tool confirmations at once (batch). + */ + const respondToTools = useCallback( + async (decisions: { tool_call_id: string; accepted: boolean }[]) => { + if (!chatId || !port || decisions.length === 0) return; + await respondToToolConfirmations( + chatId, + decisions, + port, + apiKey ?? undefined, + ); + }, + [chatId, port, apiKey], + ); + + /** + * Retry from a specific message index. + * This truncates all messages from the given index and sends a new user message. + */ + const retryFromIndex = useCallback( + async (index: number, newContent: UserMessage["content"]) => { + if (!chatId || !port) return; + + const content = convertUserMessageContent(newContent); + + await retryFromIndexApi( + chatId, + index, + content, + port, + apiKey ?? undefined, + ); + }, + [chatId, port, apiKey], + ); + + const updateMessage = useCallback( + async ( + messageId: string, + newContent: MessageContent, + regenerate?: boolean, + ) => { + if (!chatId || !port) return; + await updateMessageApi( + chatId, + messageId, + newContent, + port, + apiKey ?? undefined, + regenerate, + ); + }, + [chatId, port, apiKey], + ); + + const removeMessage = useCallback( + async (messageId: string, regenerate?: boolean) => { + if (!chatId || !port) return; + await removeMessageApi( + chatId, + messageId, + port, + apiKey ?? undefined, + regenerate, + ); + }, + [chatId, port, apiKey], + ); + + const regenerate = useCallback(async () => { + if (!chatId || !port) return; + await regenerateApi(chatId, port, apiKey ?? undefined); + }, [chatId, port, apiKey]); + + const cancelQueued = useCallback( + async (clientRequestId: string) => { + if (!chatId || !port) return false; + return cancelQueuedItem( + chatId, + clientRequestId, + port, + apiKey ?? undefined, + ); + }, + [chatId, port, apiKey], + ); + + return { + submit, + abort, + setParams, + respondToTool, + respondToTools, + retryFromIndex, + updateMessage, + removeMessage, + regenerate, + cancelQueued, + }; +} + +export default useChatActions; diff --git a/refact-agent/gui/src/hooks/useChatSubscription.ts b/refact-agent/gui/src/hooks/useChatSubscription.ts new file mode 100644 index 000000000..c4951a5ff --- /dev/null +++ b/refact-agent/gui/src/hooks/useChatSubscription.ts @@ -0,0 +1,388 @@ +import { useCallback, useEffect, useRef, useState } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { useAppSelector } from "./useAppSelector"; +import { + applyChatEvent, + clearSseRefreshRequest, +} from "../features/Chat/Thread/actions"; +import { selectSseRefreshRequested } from "../features/Chat/Thread/selectors"; +import { selectLspPort, selectApiKey } from "../features/Config/configSlice"; +import { + subscribeToChatEvents, + type ChatEventEnvelope, +} from "../services/refact/chatSubscription"; + +const DEBUG = + typeof window !== "undefined" && + new URLSearchParams(window.location.search).has("debug"); + +export type ConnectionStatus = "disconnected" | "connecting" | "connected"; + +export type UseChatSubscriptionOptions = { + /** Enable subscription (default: true) */ + enabled?: boolean; + /** Reconnect on error (default: true) */ + autoReconnect?: boolean; + /** Reconnect delay in ms (default: 2000) */ + reconnectDelay?: number; + /** Callback when event received */ + onEvent?: (event: ChatEventEnvelope) => void; + /** Callback when connected */ + onConnected?: () => void; + /** Callback when disconnected */ + onDisconnected?: () => void; + /** Callback when error occurs */ + onError?: (error: Error) => void; +}; + +/** + * Hook for subscribing to chat events via SSE. + * + * @param chatId - Chat ID to subscribe to + * @param options - Configuration options + * @returns Connection status and control functions + */ +export function useChatSubscription( + chatId: string | null | undefined, + options: UseChatSubscriptionOptions = {}, +) { + const { + enabled = true, + autoReconnect = true, + reconnectDelay = 2000, + onEvent, + onConnected, + onDisconnected, + onError, + } = options; + + const dispatch = useAppDispatch(); + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + + const [status, setStatus] = useState<ConnectionStatus>("disconnected"); + const [error, setError] = useState<Error | null>(null); + + const lastSeqRef = useRef<bigint>(0n); + const lastActivityAtRef = useRef<number>(0); + const callbacksRef = useRef({ + onEvent, + onConnected, + onDisconnected, + onError, + }); + callbacksRef.current = { onEvent, onConnected, onDisconnected, onError }; + + const STALE_THRESHOLD_MS = 45_000; + + const unsubscribeRef = useRef<(() => void) | null>(null); + const reconnectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>( + null, + ); + const streamDeltaFlushRef = useRef<ReturnType<typeof setTimeout> | null>( + null, + ); + const pendingStreamDeltaRef = useRef<Extract< + ChatEventEnvelope, + { type: "stream_delta" } + > | null>(null); + const streamedBytesRef = useRef(0); + const connectingRef = useRef(false); + // eslint-disable-next-line @typescript-eslint/no-empty-function + const connectRef = useRef<() => void>(() => {}); + + const MAX_MERGED_DELTA_OPS = 256; + + // Adaptive flush thresholds (bytes of accumulated content) + const FLUSH_TIER_FAST_BYTES = 8_192; + const FLUSH_TIER_MEDIUM_BYTES = 200_000; + const FLUSH_MS_MEDIUM = 150; + const FLUSH_MS_SLOW = 500; + const MAX_BUFFERED_BYTES = 2_000_000; + + const clearStreamDeltaFlush = useCallback(() => { + const timerId = streamDeltaFlushRef.current; + if (timerId != null) { + clearTimeout(timerId); + streamDeltaFlushRef.current = null; + } + }, []); + + const flushPendingStreamDelta = useCallback(() => { + const pending = pendingStreamDeltaRef.current; + if (!pending) return; + pendingStreamDeltaRef.current = null; + dispatch(applyChatEvent(pending)); + callbacksRef.current.onEvent?.(pending); + }, [dispatch]); + + const scheduleStreamDeltaFlush = useCallback(() => { + if (streamDeltaFlushRef.current != null) return; + + const bytes = streamedBytesRef.current; + let delayMs: number; + if (bytes < FLUSH_TIER_FAST_BYTES) { + delayMs = 0; + } else if (bytes < FLUSH_TIER_MEDIUM_BYTES) { + delayMs = FLUSH_MS_MEDIUM; + } else { + delayMs = FLUSH_MS_SLOW; + } + + const flush = () => { + streamDeltaFlushRef.current = null; + flushPendingStreamDelta(); + }; + + streamDeltaFlushRef.current = setTimeout(flush, Math.max(delayMs, 0)); + }, [flushPendingStreamDelta]); + + const enqueueStreamDelta = useCallback( + (envelope: Extract<ChatEventEnvelope, { type: "stream_delta" }>) => { + for (const op of envelope.ops) { + if (op.op === "append_content" || op.op === "append_reasoning") { + streamedBytesRef.current += op.text.length; + } + } + + const pending = pendingStreamDeltaRef.current; + if (pending && pending.message_id === envelope.message_id) { + const mergedOpsLen = pending.ops.length + envelope.ops.length; + if (mergedOpsLen <= MAX_MERGED_DELTA_OPS) { + pending.seq = envelope.seq; + pending.ops.push(...envelope.ops); + } else { + flushPendingStreamDelta(); + pendingStreamDeltaRef.current = envelope; + } + } else { + flushPendingStreamDelta(); + pendingStreamDeltaRef.current = envelope; + } + + if (streamedBytesRef.current > MAX_BUFFERED_BYTES) { + clearStreamDeltaFlush(); + flushPendingStreamDelta(); + return; + } + + scheduleStreamDeltaFlush(); + }, + [flushPendingStreamDelta, scheduleStreamDeltaFlush, clearStreamDeltaFlush], + ); + + const cleanup = useCallback(() => { + if (reconnectTimeoutRef.current) { + clearTimeout(reconnectTimeoutRef.current); + reconnectTimeoutRef.current = null; + } + clearStreamDeltaFlush(); + pendingStreamDeltaRef.current = null; + streamedBytesRef.current = 0; + if (unsubscribeRef.current) { + unsubscribeRef.current(); + unsubscribeRef.current = null; + } + connectingRef.current = false; + }, [clearStreamDeltaFlush]); + + const scheduleReconnect = useCallback( + (delayMs: number) => { + if (!autoReconnect || !enabled || !chatId || !port) return; + + if (reconnectTimeoutRef.current) { + clearTimeout(reconnectTimeoutRef.current); + } + + reconnectTimeoutRef.current = setTimeout(() => { + connectRef.current(); + }, delayMs); + }, + [autoReconnect, enabled, chatId, port], + ); + + const connect = useCallback(() => { + if (!chatId || !port || !enabled) return; + if (connectingRef.current) return; + + cleanup(); + connectingRef.current = true; + lastSeqRef.current = 0n; + setStatus("connecting"); + setError(null); + + unsubscribeRef.current = subscribeToChatEvents( + chatId, + port, + { + onEvent: (envelope) => { + try { + const seq = BigInt(envelope.seq); + if (envelope.type === "snapshot") { + if (DEBUG) { + // eslint-disable-next-line no-console + console.log( + "[SSE] Received snapshot event, seq:", + envelope.seq, + "messages:", + (envelope as { messages?: unknown[] }).messages?.length ?? + "?", + ); + } + streamedBytesRef.current = 0; + lastSeqRef.current = seq; + } else { + if (seq <= lastSeqRef.current) { + return; + } + if (seq > lastSeqRef.current + 1n) { + if (DEBUG) { + // eslint-disable-next-line no-console + console.log( + "[SSE] Sequence gap detected, reconnecting. Expected:", + (lastSeqRef.current + 1n).toString(), + "Got:", + envelope.seq, + ); + } + flushPendingStreamDelta(); + cleanup(); + setStatus("disconnected"); + scheduleReconnect(0); + return; + } + lastSeqRef.current = seq; + } + lastActivityAtRef.current = Date.now(); + if (envelope.type === "stream_delta") { + enqueueStreamDelta(envelope); + } else { + flushPendingStreamDelta(); + if (envelope.type === "stream_finished") { + streamedBytesRef.current = 0; + } + dispatch(applyChatEvent(envelope)); + callbacksRef.current.onEvent?.(envelope); + } + } catch (err) { + // Error processing event - likely malformed data + callbacksRef.current.onError?.( + err instanceof Error ? err : new Error(String(err)), + ); + } + }, + onConnected: () => { + connectingRef.current = false; + setStatus("connected"); + setError(null); + callbacksRef.current.onConnected?.(); + }, + onDisconnected: () => { + flushPendingStreamDelta(); + connectingRef.current = false; + setStatus("disconnected"); + callbacksRef.current.onDisconnected?.(); + scheduleReconnect(reconnectDelay); + }, + onError: (err) => { + flushPendingStreamDelta(); + connectingRef.current = false; + setStatus("disconnected"); + setError(err); + callbacksRef.current.onError?.(err); + cleanup(); + scheduleReconnect(reconnectDelay); + }, + }, + apiKey ?? undefined, + ); + }, [ + chatId, + port, + apiKey, + enabled, + cleanup, + enqueueStreamDelta, + flushPendingStreamDelta, + dispatch, + scheduleReconnect, + reconnectDelay, + ]); + + connectRef.current = connect; + + const disconnect = useCallback(() => { + cleanup(); + setStatus("disconnected"); + }, [cleanup]); + + const reconnect = useCallback(() => { + if (DEBUG) + console.log("[SSE] Manual reconnect triggered for chat:", chatId); // eslint-disable-line no-console + setTimeout(() => { + connect(); + }, 50); + }, [connect, chatId]); + + useEffect(() => { + if (chatId && enabled) { + connect(); + } else { + disconnect(); + } + + return cleanup; + }, [chatId, enabled, connect, disconnect, cleanup]); + + useEffect(() => { + if (status === "connected" && chatId && enabled) { + if (DEBUG) + console.log("[SSE] Port changed, reconnecting for chat:", chatId); // eslint-disable-line no-console + connect(); + } + }, [port]); // eslint-disable-line react-hooks/exhaustive-deps + + // Listen for SSE refresh requests (e.g., after trajectory transform) + const sseRefreshRequested = useAppSelector(selectSseRefreshRequested); + useEffect(() => { + if (sseRefreshRequested === chatId && enabled) { + // eslint-disable-next-line no-console + if (DEBUG) console.log("[SSE] Refresh requested for chat:", chatId); + dispatch(clearSseRefreshRequest()); + reconnect(); + } + }, [sseRefreshRequested, chatId, enabled, dispatch, reconnect]); + + useEffect(() => { + const handleVisibilityChange = () => { + if (document.visibilityState !== "visible") return; + if (!chatId || !enabled) return; + + const lastActivity = lastActivityAtRef.current; + const isStale = + lastActivity > 0 && Date.now() - lastActivity > STALE_THRESHOLD_MS; + + if (isStale && unsubscribeRef.current) { + reconnect(); + } + }; + + document.addEventListener("visibilitychange", handleVisibilityChange); + return () => { + document.removeEventListener("visibilitychange", handleVisibilityChange); + }; + }, [chatId, enabled, reconnect]); + + return { + status, + error, + lastSeq: lastSeqRef.current.toString(), + connect, + disconnect, + reconnect, + isConnected: status === "connected", + isConnecting: status === "connecting", + }; +} + +export default useChatSubscription; diff --git a/refact-agent/gui/src/hooks/useCheckpoints.ts b/refact-agent/gui/src/hooks/useCheckpoints.ts index 18dce9bdb..35fd22586 100644 --- a/refact-agent/gui/src/hooks/useCheckpoints.ts +++ b/refact-agent/gui/src/hooks/useCheckpoints.ts @@ -1,4 +1,5 @@ import { useCallback, useMemo } from "react"; +import type { RestoreMode } from "../features/Checkpoints/Checkpoints"; import { useAppSelector } from "./useAppSelector"; import { selectCheckpointsMessageIndex, @@ -20,6 +21,7 @@ import { newChatAction, selectChatId, selectMessages, + selectThreadMode, } from "../features/Chat"; import { isUserMessage, telemetryApi } from "../services/refact"; import { deleteChatById } from "../features/History/historySlice"; @@ -31,6 +33,7 @@ export const useCheckpoints = () => { const dispatch = useAppDispatch(); const messages = useAppSelector(selectMessages); const chatId = useAppSelector(selectChatId); + const chatMode = useAppSelector(selectThreadMode); const configIdeHost = useAppSelector(selectConfig).host; const [sendTelemetryEvent] = @@ -89,9 +92,15 @@ export const useCheckpoints = () => { if (!checkpoints) return; const amountOfUserMessages = messages.filter(isUserMessage); const firstUserMessage = amountOfUserMessages[0]; + // Capture chat_id and mode at click time to avoid race conditions + const currentChatId = chatId; + const currentChatMode = chatMode; try { - const previewedChanges = - await previewChangesFromCheckpoints(checkpoints).unwrap(); + const previewedChanges = await previewChangesFromCheckpoints( + checkpoints, + currentChatId, + currentChatMode, + ).unwrap(); void sendTelemetryEvent({ scope: `rollbackChanges/preview`, success: true, @@ -103,6 +112,8 @@ export const useCheckpoints = () => { ...previewedChanges, current_checkpoints: checkpoints, messageIndex, + chat_id: currentChatId, + chat_mode: currentChatMode, }), setIsCheckpointsPopupIsVisible(true), setShouldNewChatBeStarted( @@ -120,70 +131,90 @@ export const useCheckpoints = () => { }); } }, - [dispatch, previewChangesFromCheckpoints, sendTelemetryEvent, messages], + [ + dispatch, + previewChangesFromCheckpoints, + sendTelemetryEvent, + messages, + chatId, + chatMode, + ], ); - const handleFix = useCallback(async () => { - try { - const response = await restoreChangesFromCheckpoints( - latestRestoredCheckpointsResult.current_checkpoints, - ).unwrap(); - if (response.success) { - void sendTelemetryEvent({ - scope: `rollbackChanges/confirmed`, - success: true, - error_message: "", - }); - - if (configIdeHost === "jetbrains") { - const files = - latestRestoredCheckpointsResult.reverted_changes.flatMap( - (change) => change.files_changed, - ); - files.forEach((file) => { - setForceReloadFileByPath(file.absolute_path); + const handleFix = useCallback( + async (restoreMode: RestoreMode = "files_and_messages") => { + try { + // Use chat_id and mode stored at preview time, not current state + const response = await restoreChangesFromCheckpoints( + latestRestoredCheckpointsResult.current_checkpoints, + latestRestoredCheckpointsResult.chat_id, + latestRestoredCheckpointsResult.chat_mode, + ).unwrap(); + if (response.success) { + void sendTelemetryEvent({ + scope: `rollbackChanges/confirmed`, + success: true, + error_message: "", }); + + if (configIdeHost === "jetbrains") { + const files = + latestRestoredCheckpointsResult.reverted_changes.flatMap( + (change) => change.files_changed, + ); + files.forEach((file) => { + setForceReloadFileByPath(file.absolute_path); + }); + } + + dispatch(setIsCheckpointsPopupIsVisible(false)); + } else { + dispatch(setCheckpointsErrorLog(response.error_log)); + return; } - dispatch(setIsCheckpointsPopupIsVisible(false)); - } else { - dispatch(setCheckpointsErrorLog(response.error_log)); - return; - } - if (shouldNewChatBeStarted || !maybeMessageIndex) { - const actions = [newChatAction(), deleteChatById(chatId)]; - actions.forEach((action) => dispatch(action)); - } else { - const usefulMessages = messages.slice(0, maybeMessageIndex); - dispatch( - backUpMessages({ - id: chatId, - messages: usefulMessages, - }), - ); + // Only undo messages if restoreMode is "files_and_messages" + if (restoreMode === "files_and_messages") { + if (shouldNewChatBeStarted || !maybeMessageIndex) { + const actions = [newChatAction(), deleteChatById(chatId)]; + actions.forEach((action) => dispatch(action)); + } else { + const usefulMessages = messages.slice(0, maybeMessageIndex); + dispatch( + backUpMessages({ + id: chatId, + messages: usefulMessages, + }), + ); + } + } + // If restoreMode is "files_only", we don't touch the messages + } catch (error) { + void sendTelemetryEvent({ + scope: `rollbackChanges/failed`, + success: false, + error_message: `rollback: failed to apply previewed changes from checkpoints. checkpoints: ${JSON.stringify( + latestRestoredCheckpointsResult.current_checkpoints, + )}`, + }); } - } catch (error) { - void sendTelemetryEvent({ - scope: `rollbackChanges/failed`, - success: false, - error_message: `rollback: failed to apply previewed changes from checkpoints. checkpoints: ${JSON.stringify( - latestRestoredCheckpointsResult.current_checkpoints, - )}`, - }); - } - }, [ - dispatch, - sendTelemetryEvent, - setForceReloadFileByPath, - restoreChangesFromCheckpoints, - configIdeHost, - shouldNewChatBeStarted, - maybeMessageIndex, - chatId, - messages, - latestRestoredCheckpointsResult.current_checkpoints, - latestRestoredCheckpointsResult.reverted_changes, - ]); + }, + [ + dispatch, + sendTelemetryEvent, + setForceReloadFileByPath, + restoreChangesFromCheckpoints, + configIdeHost, + shouldNewChatBeStarted, + maybeMessageIndex, + chatId, + messages, + latestRestoredCheckpointsResult.current_checkpoints, + latestRestoredCheckpointsResult.reverted_changes, + latestRestoredCheckpointsResult.chat_id, + latestRestoredCheckpointsResult.chat_mode, + ], + ); return { shouldCheckpointsPopupBeShown, diff --git a/refact-agent/gui/src/hooks/useCompressChat.ts b/refact-agent/gui/src/hooks/useCompressChat.ts deleted file mode 100644 index f539c0483..000000000 --- a/refact-agent/gui/src/hooks/useCompressChat.ts +++ /dev/null @@ -1,50 +0,0 @@ -import { useCallback } from "react"; -import { selectThread } from "../features/Chat/Thread/selectors"; -import { useAppSelector } from "./useAppSelector"; -import { ChatMessages, knowledgeApi } from "../services/refact"; -import { newChatAction } from "../events"; -import { useAppDispatch } from "./useAppDispatch"; -import { setError } from "../features/Errors/errorsSlice"; -import { setIsWaitingForResponse, setSendImmediately } from "../features/Chat"; - -export function useCompressChat() { - const dispatch = useAppDispatch(); - const thread = useAppSelector(selectThread); - - const [submit, request] = knowledgeApi.useCompressMessagesMutation({ - fixedCacheKey: thread.id, - }); - - const compressChat = useCallback(async () => { - dispatch(setIsWaitingForResponse(true)); - const result = await submit({ - messages: thread.messages, - project: thread.project_name ?? "", - }); - dispatch(setIsWaitingForResponse(false)); - - if (result.error) { - // TODO: handle errors - dispatch( - setError("Error compressing chat: " + JSON.stringify(result.error)), - ); - } - - if (result.data) { - const content = - "🗜️ I am continuing from a compressed chat history. Here is what happened so far: " + - result.data.trajectory; - const messages: ChatMessages = [{ role: "user", content }]; - - const action = newChatAction({ messages, title: `🗜️ ${thread.title}` }); - dispatch(action); - dispatch(setSendImmediately(true)); - } - }, [dispatch, submit, thread.messages, thread.project_name, thread.title]); - - return { - compressChat, - compressChatRequest: request, - isCompressing: request.isLoading, - }; -} diff --git a/refact-agent/gui/src/hooks/useCompressionStop.ts b/refact-agent/gui/src/hooks/useCompressionStop.ts deleted file mode 100644 index 6bcbb4c85..000000000 --- a/refact-agent/gui/src/hooks/useCompressionStop.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { useEffect, useCallback, useMemo } from "react"; -import { useAppSelector } from "./useAppSelector"; -import { useAppDispatch } from "./useAppDispatch"; -import { - selectChatId, - selectLastSentCompression, - selectMessages, - setIsNewChatSuggested, - setIsNewChatSuggestionRejected, - setPreventSend, -} from "../features/Chat"; -import { takeFromEndWhile } from "../utils"; -import { isUserMessage } from "../events"; - -export function useLastSentCompressionStop() { - const dispatch = useAppDispatch(); - const lastSentCompression = useAppSelector(selectLastSentCompression); - const messages = useAppSelector(selectMessages); - const chatId = useAppSelector(selectChatId); - - const messagesFromLastUserMessage = useMemo(() => { - return takeFromEndWhile(messages, (message) => !isUserMessage(message)) - .length; - }, [messages]); - - useEffect(() => { - if ( - lastSentCompression && - lastSentCompression !== "absent" && - messagesFromLastUserMessage >= 40 - ) { - dispatch(setPreventSend({ id: chatId })); - dispatch(setIsNewChatSuggested({ chatId, value: true })); - } - }, [chatId, dispatch, lastSentCompression, messagesFromLastUserMessage]); - - const resume = useCallback(() => { - dispatch(setIsNewChatSuggestionRejected({ chatId, value: true })); - }, [chatId, dispatch]); - - return { resume, strength: lastSentCompression }; -} diff --git a/refact-agent/gui/src/hooks/useDraftMessage.ts b/refact-agent/gui/src/hooks/useDraftMessage.ts new file mode 100644 index 000000000..007f2e51c --- /dev/null +++ b/refact-agent/gui/src/hooks/useDraftMessage.ts @@ -0,0 +1,68 @@ +import { useState, useEffect, useCallback, useRef } from "react"; +import { useAppSelector } from "./useAppSelector"; +import { selectChatId } from "../features/Chat"; +import { + saveDraftMessage, + getDraftMessage, + clearDraftMessage, +} from "../utils/threadStorage"; +import { useDebounceCallback } from "usehooks-ts"; + +export function useDraftMessage() { + const chatId = useAppSelector(selectChatId); + const [value, setValueInternal] = useState<string>(() => { + if (chatId) { + return getDraftMessage(chatId); + } + return ""; + }); + const prevChatIdRef = useRef<string>(chatId); + const isInitialMount = useRef<boolean>(true); + + const debouncedSave = useDebounceCallback((id: string, content: string) => { + saveDraftMessage(id, content); + }, 500); + + useEffect(() => { + return () => { + debouncedSave.flush(); + }; + }, [debouncedSave]); + + useEffect(() => { + if (isInitialMount.current) { + isInitialMount.current = false; + return; + } + + if (chatId && chatId !== prevChatIdRef.current) { + debouncedSave.flush(); + + const draft = getDraftMessage(chatId); + setValueInternal(draft); + prevChatIdRef.current = chatId; + } + }, [chatId, debouncedSave]); + + const setValue = useCallback( + (newValue: string | ((prev: string) => string)) => { + setValueInternal((prev) => { + const next = typeof newValue === "function" ? newValue(prev) : newValue; + if (chatId) { + debouncedSave(chatId, next); + } + return next; + }); + }, + [chatId, debouncedSave], + ); + + const clearDraft = useCallback(() => { + if (chatId) { + clearDraftMessage(chatId); + setValueInternal(""); + } + }, [chatId]); + + return { value, setValue, clearDraft }; +} diff --git a/refact-agent/gui/src/hooks/useEnsureSubscriptionConnected.ts b/refact-agent/gui/src/hooks/useEnsureSubscriptionConnected.ts new file mode 100644 index 000000000..908f36484 --- /dev/null +++ b/refact-agent/gui/src/hooks/useEnsureSubscriptionConnected.ts @@ -0,0 +1,86 @@ +import { useCallback, useRef } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { useAppSelector } from "./useAppSelector"; +import { selectSnapshotReceivedById } from "../features/Chat/Thread/selectors"; +import { requestSseRefresh } from "../features/Chat/Thread/actions"; +import { store } from "../app/store"; + +function waitUntil( + predicate: () => boolean, + timeoutMs: number, + intervalMs: number, +): Promise<void> { + return new Promise((resolve) => { + if (predicate()) { + resolve(); + return; + } + + const start = Date.now(); + const interval = setInterval(() => { + if (predicate() || Date.now() - start >= timeoutMs) { + clearInterval(interval); + resolve(); + } + }, intervalMs); + }); +} + +export function useEnsureSubscriptionConnected( + chatId: string | null | undefined, +) { + const dispatch = useAppDispatch(); + + const chatIdRef = useRef(chatId); + chatIdRef.current = chatId; + + const hasSnapshot = useCallback(() => { + const id = chatIdRef.current; + if (!id) return true; + return selectSnapshotReceivedById(store.getState(), id); + }, []); + + const hasCachedMessages = useCallback(() => { + const id = chatIdRef.current; + if (!id) return true; + const rt = store.getState().chat.threads[id]; + return !!rt && rt.thread.messages.length > 0; + }, []); + + const pendingRef = useRef<Promise<void> | null>(null); + + const ensureConnected = useCallback(async (): Promise<void> => { + const targetChatId = chatIdRef.current; + if (!targetChatId) return; + + if (hasSnapshot() || hasCachedMessages()) return; + + dispatch(requestSseRefresh({ chatId: targetChatId })); + + if (!pendingRef.current) { + pendingRef.current = waitUntil( + () => + chatIdRef.current !== targetChatId || + hasSnapshot() || + hasCachedMessages(), + 5000, + 100, + ).finally(() => { + pendingRef.current = null; + }); + } + + await pendingRef.current; + }, [dispatch, hasSnapshot, hasCachedMessages]); + + const isConnected = useAppSelector((state) => { + if (!chatId) return true; + return selectSnapshotReceivedById(state, chatId); + }); + + return { + ensureConnected, + isConnected, + isConnecting: !isConnected, + }; +} diff --git a/refact-agent/gui/src/hooks/useEventBusForApp.ts b/refact-agent/gui/src/hooks/useEventBusForApp.ts index 269201c55..4c2780bd6 100644 --- a/refact-agent/gui/src/hooks/useEventBusForApp.ts +++ b/refact-agent/gui/src/hooks/useEventBusForApp.ts @@ -1,4 +1,4 @@ -import { useEffect } from "react"; +import { useEffect, useRef } from "react"; import { useAppSelector } from "./useAppSelector"; import { useAppDispatch } from "./useAppDispatch"; import { useConfig } from "./useConfig"; @@ -6,14 +6,18 @@ import { updateConfig } from "../features/Config/configSlice"; import { setFileInfo } from "../features/Chat/activeFile"; import { setSelectedSnippet } from "../features/Chat/selectedSnippet"; import { setCurrentProjectInfo } from "../features/Chat/currentProject"; -import { newChatAction } from "../features/Chat/Thread/actions"; +import { + newChatAction, + newChatWithInitialMessages, +} from "../features/Chat/Thread/actions"; import { isPageInHistory, push, selectPages, } from "../features/Pages/pagesSlice"; -import { ideToolCallResponse } from "./useEventBusForIDE"; +import { ideToolCallResponse, ideSwitchToThread } from "./useEventBusForIDE"; import { createAction } from "@reduxjs/toolkit/react"; +import { switchToThread } from "../features/Chat/Thread/actions"; export const ideAttachFileToChat = createAction<string>("ide/attachFileToChat"); @@ -21,6 +25,8 @@ export function useEventBusForApp() { const config = useConfig(); const dispatch = useAppDispatch(); const pages = useAppSelector(selectPages); + const pagesRef = useRef(pages); + pagesRef.current = pages; useEffect(() => { const listener = (event: MessageEvent) => { @@ -37,10 +43,20 @@ export function useEventBusForApp() { } if (newChatAction.match(event.data)) { - if (!isPageInHistory({ pages }, "chat")) { + if (!isPageInHistory({ pages: pagesRef.current }, "chat")) { dispatch(push({ name: "chat" })); } - dispatch(newChatAction(event.data.payload)); + const payload = event.data.payload; + if (payload?.messages && payload.messages.length > 0) { + void dispatch( + newChatWithInitialMessages({ + title: payload.title, + messages: payload.messages, + }), + ); + } else { + dispatch(newChatAction(payload)); + } } if (setCurrentProjectInfo.match(event.data)) { @@ -51,6 +67,13 @@ export function useEventBusForApp() { dispatch(event.data); } + if (ideSwitchToThread.match(event.data)) { + if (!isPageInHistory({ pages: pagesRef.current }, "chat")) { + dispatch(push({ name: "chat" })); + } + dispatch(switchToThread({ id: event.data.payload.chatId })); + } + // TODO: ideToolEditResponse. // TODO: active project @@ -63,5 +86,5 @@ export function useEventBusForApp() { return () => { window.removeEventListener("message", listener); }; - }, [config.host, dispatch, pages]); + }, [config.host, dispatch]); } diff --git a/refact-agent/gui/src/hooks/useEventBusForIDE.ts b/refact-agent/gui/src/hooks/useEventBusForIDE.ts index 75f1d48ac..04a35094b 100644 --- a/refact-agent/gui/src/hooks/useEventBusForIDE.ts +++ b/refact-agent/gui/src/hooks/useEventBusForIDE.ts @@ -83,6 +83,28 @@ export const ideClearActiveTeamsGroup = createAction<undefined>( "ide/clearActiveTeamsGroup", ); +export const ideTaskDone = createAction<{ + chatId: string; + toolCallId: string; + summary: string; + knowledgePath?: string; +}>("ide/taskDone"); + +export const ideAskQuestions = createAction<{ + chatId: string; + toolCallId: string; + questions: { + id: string; + type: string; + text: string; + options?: string[]; + }[]; +}>("ide/askQuestions"); + +export const ideSwitchToThread = createAction<{ + chatId: string; +}>("ide/switchToThread"); + export const useEventsBusForIDE = () => { const [sendTelemetryEvent] = telemetryApi.useLazySendTelemetryChatEventQuery(); @@ -294,6 +316,41 @@ export const useEventsBusForIDE = () => { postMessage(action); }, [postMessage]); + const notifyTaskDone = useCallback( + ( + chatId: string, + toolCallId: string, + summary: string, + knowledgePath?: string, + ) => { + const action = ideTaskDone({ + chatId, + toolCallId, + summary, + knowledgePath, + }); + postMessage(action); + }, + [postMessage], + ); + + const notifyAskQuestions = useCallback( + ( + chatId: string, + toolCallId: string, + questions: { + id: string; + type: string; + text: string; + options?: string[]; + }[], + ) => { + const action = ideAskQuestions({ chatId, toolCallId, questions }); + postMessage(action); + }, + [postMessage], + ); + return { diffPasteBack, openSettings, @@ -318,5 +375,7 @@ export const useEventsBusForIDE = () => { setLoginMessage, setActiveTeamsGroupInIDE, clearActiveTeamsGroupInIDE, + notifyTaskDone, + notifyAskQuestions, }; }; diff --git a/refact-agent/gui/src/hooks/useEventBusForWeb.ts b/refact-agent/gui/src/hooks/useEventBusForWeb.ts index 6ac86129c..10c68edd6 100644 --- a/refact-agent/gui/src/hooks/useEventBusForWeb.ts +++ b/refact-agent/gui/src/hooks/useEventBusForWeb.ts @@ -1,4 +1,4 @@ -import { useEffect } from "react"; +import { useEffect, useRef } from "react"; import { useLocalStorage } from "usehooks-ts"; import { isLogOut, isOpenExternalUrl, isSetupHost } from "../events/setup"; import { useAppDispatch } from "./useAppDispatch"; @@ -12,6 +12,10 @@ export function useEventBusForWeb() { const [addressURL, setAddressURL] = useLocalStorage("lspUrl", ""); const [apiKey, setApiKey] = useLocalStorage("apiKey", ""); const dispatch = useAppDispatch(); + const addressURLRef = useRef(addressURL); + const apiKeyRef = useRef(apiKey); + addressURLRef.current = addressURL; + apiKeyRef.current = apiKey; useEffect(() => { if (config.host !== "web") { @@ -30,23 +34,25 @@ export function useEventBusForWeb() { if (isSetupHost(event.data)) { const { host } = event.data.payload; - if (host.type === "cloud") { - setAddressURL("Refact"); - setApiKey(host.apiKey); - } else if (host.type === "self") { - setAddressURL(host.endpointAddress); - setApiKey("any-will-work-for-local-server"); - } else { - setAddressURL(host.endpointAddress); - setApiKey(host.apiKey); - } - dispatch(updateConfig({ addressURL, apiKey })); + setAddressURL("Refact"); + setApiKey(host.apiKey); + dispatch( + updateConfig({ + addressURL: addressURLRef.current, + apiKey: apiKeyRef.current, + }), + ); } if (isLogOut(event.data)) { setAddressURL(""); setApiKey(""); - dispatch(updateConfig({ addressURL, apiKey })); + dispatch( + updateConfig({ + addressURL: addressURLRef.current, + apiKey: apiKeyRef.current, + }), + ); } }; @@ -55,7 +61,7 @@ export function useEventBusForWeb() { return () => { window.removeEventListener("message", listener); }; - }, [setApiKey, setAddressURL, config.host, dispatch, addressURL, apiKey]); + }, [setApiKey, setAddressURL, config.host, dispatch]); useEffect(() => { if (config.host !== "web") { diff --git a/refact-agent/gui/src/hooks/useGetPing.ts b/refact-agent/gui/src/hooks/useGetPing.ts index 191ca2ce8..bccc7c64c 100644 --- a/refact-agent/gui/src/hooks/useGetPing.ts +++ b/refact-agent/gui/src/hooks/useGetPing.ts @@ -3,11 +3,17 @@ import { useState, useEffect } from "react"; import { selectConfig } from "../features/Config/configSlice"; import { pingApi } from "../services/refact"; import { useAppSelector } from "./useAppSelector"; +import { useAppDispatch } from "./useAppDispatch"; +import { setBackendStatus } from "../features/Connection"; + +const POLL_INTERVAL_HEALTHY = 5000; +const POLL_INTERVAL_ERROR = 2000; export const useGetPing = () => { + const dispatch = useAppDispatch(); const currentLspPort = useAppSelector(selectConfig).lspPort; - const [pollingInterval, setPollingInterval] = useState(1000); + const [pollingInterval, setPollingInterval] = useState(POLL_INTERVAL_ERROR); const [queryStarted, setQueryStarted] = useState(false); const result = pingApi.endpoints.ping.useQuery(currentLspPort, { @@ -21,20 +27,33 @@ export const useGetPing = () => { } }, [result.requestId, queryStarted]); - // Effect to manage polling based on query status useEffect(() => { if (result.isUninitialized && queryStarted) { - setPollingInterval(1000); + setPollingInterval(POLL_INTERVAL_ERROR); setQueryStarted(false); } else if (result.isSuccess) { - setPollingInterval(0); + setPollingInterval(POLL_INTERVAL_HEALTHY); + dispatch(setBackendStatus({ status: "online" })); } else if (result.isError) { - setPollingInterval(1000); + setPollingInterval(POLL_INTERVAL_ERROR); + const err = result.error as Record<string, unknown> | undefined; + const errorMsg = + err && typeof err === "object" && "message" in err + ? String(err.message) + : "Connection failed"; + dispatch(setBackendStatus({ status: "offline", error: errorMsg })); } - }, [result.isSuccess, result.isError, result.isUninitialized, queryStarted]); + }, [ + result.isSuccess, + result.isError, + result.isUninitialized, + result.error, + queryStarted, + dispatch, + ]); useEffect(() => { - setPollingInterval(1000); + setPollingInterval(POLL_INTERVAL_ERROR); setQueryStarted(false); }, [currentLspPort]); diff --git a/refact-agent/gui/src/hooks/useGetUser.ts b/refact-agent/gui/src/hooks/useGetUser.ts index 885ec01be..fe175f4d0 100644 --- a/refact-agent/gui/src/hooks/useGetUser.ts +++ b/refact-agent/gui/src/hooks/useGetUser.ts @@ -29,7 +29,7 @@ export const useGetUser = () => { ) || isStreaming || (supportsMetadata !== undefined && !supportsMetadata), // if it's enterprise, then skipping this request - pollingInterval: 60 * 60 * 1000, // 1 hour + pollingInterval: 5 * 60 * 1000, }, ); diff --git a/refact-agent/gui/src/hooks/useGoToLink.ts b/refact-agent/gui/src/hooks/useGoToLink.ts index d633f4550..e7a6e1293 100644 --- a/refact-agent/gui/src/hooks/useGoToLink.ts +++ b/refact-agent/gui/src/hooks/useGoToLink.ts @@ -4,15 +4,22 @@ import { isAbsolutePath } from "../utils/isAbsolutePath"; import { useAppDispatch } from "./useAppDispatch"; import { popBackTo, push } from "../features/Pages/pagesSlice"; import { useAppSelector } from "./useAppSelector"; -import { selectIntegration } from "../features/Chat/Thread/selectors"; +import { + selectIntegration, + selectChatId, +} from "../features/Chat/Thread/selectors"; import { debugIntegrations } from "../debugConfig"; -import { newChatAction } from "../features/Chat/Thread/actions"; -import { clearPauseReasonsAndHandleToolsStatus } from "../features/ToolConfirmation/confirmationSlice"; +import { + newChatAction, + clearThreadPauseReasons, + setThreadConfirmationStatus, +} from "../features/Chat/Thread/actions"; export function useGoToLink() { const dispatch = useAppDispatch(); const { queryPathThenOpenFile } = useEventsBusForIDE(); const maybeIntegration = useAppSelector(selectIntegration); + const chatId = useAppSelector(selectChatId); const handleGoTo = useCallback( ({ goto }: { goto?: string }) => { @@ -55,8 +62,10 @@ export function useGoToLink() { case "newchat": { dispatch(newChatAction()); + dispatch(clearThreadPauseReasons({ id: chatId })); dispatch( - clearPauseReasonsAndHandleToolsStatus({ + setThreadConfirmationStatus({ + id: chatId, wasInteracted: false, confirmationStatus: true, }), @@ -72,15 +81,7 @@ export function useGoToLink() { } } }, - [ - dispatch, - // maybeIntegration?.name, - // maybeIntegration?.path, - // maybeIntegration?.project, - // maybeIntegration?.shouldIntermediatePageShowUp, - maybeIntegration, - queryPathThenOpenFile, - ], + [dispatch, chatId, maybeIntegration, queryPathThenOpenFile], ); return { handleGoTo }; diff --git a/refact-agent/gui/src/hooks/useLinksFromLsp.ts b/refact-agent/gui/src/hooks/useLinksFromLsp.ts index a54cc2db0..098178bbc 100644 --- a/refact-agent/gui/src/hooks/useLinksFromLsp.ts +++ b/refact-agent/gui/src/hooks/useLinksFromLsp.ts @@ -9,9 +9,8 @@ import { import { useAppDispatch } from "./useAppDispatch"; import { useAppSelector } from "./useAppSelector"; import { useGetCapsQuery } from "./useGetCapsQuery"; -import { useSendChatRequest } from "./useSendChatRequest"; +import { useChatActions } from "./useChatActions"; import { - chatModeToLspMode, selectAreFollowUpsEnabled, selectChatId, selectIntegration, @@ -24,6 +23,7 @@ import { setIntegrationData, setIsNewChatSuggested, } from "../features/Chat"; +import { DEFAULT_MODE } from "../features/Chat/Thread/types"; import { useGoToLink } from "./useGoToLink"; import { setError } from "../features/Errors/errorsSlice"; import { setInformation } from "../features/Errors/informationSlice"; @@ -84,7 +84,7 @@ export function useGetLinksFromLsp() { chat_id: chatId, messages, model: model ?? "", - mode: chatModeToLspMode({ defaultMode: threadMode }), + mode: threadMode ?? DEFAULT_MODE, current_config_file: maybeIntegration?.path, }, { skip: skipLinksRequest }, @@ -107,7 +107,7 @@ export function useGetLinksFromLsp() { export function useLinksFromLsp() { const dispatch = useAppDispatch(); const { handleGoTo } = useGoToLink(); - const { submit } = useSendChatRequest(); + const { submit, setParams } = useChatActions(); const [applyCommit, _applyCommitResult] = linksApi.useSendCommitMutation(); @@ -202,20 +202,17 @@ export function useLinksFromLsp() { } if (link.link_action === "follow-up") { - submit({ - question: link.link_text, - }); + void submit(link.link_text); return; } if (link.link_action === "summarize-project") { if ("link_summary_path" in link && link.link_summary_path) { dispatch(setIntegrationData({ path: link.link_summary_path })); - // set the integration data } - submit({ - question: link.link_text, - maybeMode: "PROJECT_SUMMARY", + // Set mode then send message + void setParams({ mode: "PROJECT_SUMMARY" }).then(() => { + void submit(link.link_text); }); return; } @@ -223,9 +220,6 @@ export function useLinksFromLsp() { // TBD: It should be safe to remove this now? if (link.link_action === "regenerate-with-increased-context-size") { dispatch(setIncreaseMaxTokens(true)); - submit({ - maybeDropLastMessage: true, - }); return; } @@ -264,20 +258,25 @@ export function useLinksFromLsp() { path: link.link_payload.chat_meta.current_config_file, }), ); - // should stop recommending integrations link be opening a chat? - // maybe it's better to do something similar to commit link, by calling endpoint in the LSP debugRefact(`[DEBUG]: link messages: `, link.link_payload.messages); - submit({ - maybeMode: link.link_payload.chat_meta.chat_mode, - maybeMessages: link.link_payload.messages, - }); + const lastMsg = + link.link_payload.messages[link.link_payload.messages.length - 1]; + if (lastMsg.role === "user") { + const content = + typeof lastMsg.content === "string" ? lastMsg.content : ""; + void setParams({ mode: link.link_payload.chat_meta.chat_mode }).then( + () => { + void submit(content); + }, + ); + } return; } // eslint-disable-next-line no-console console.warn(`unknown action: ${JSON.stringify(link)}`); }, - [applyCommit, dispatch, handleGoTo, sendTelemetryEvent, submit], + [applyCommit, dispatch, handleGoTo, sendTelemetryEvent, submit, setParams], ); const linksResult = useGetLinksFromLsp(); diff --git a/refact-agent/gui/src/hooks/useLoadMoreHistory.ts b/refact-agent/gui/src/hooks/useLoadMoreHistory.ts new file mode 100644 index 000000000..19749e46c --- /dev/null +++ b/refact-agent/gui/src/hooks/useLoadMoreHistory.ts @@ -0,0 +1,66 @@ +import { useCallback, useState, useRef } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { useAppSelector } from "./useAppSelector"; +import { trajectoriesApi } from "../services/refact/trajectories"; +import { + hydrateHistoryFromMeta, + setPagination, +} from "../features/History/historySlice"; + +export function useLoadMoreHistory() { + const dispatch = useAppDispatch(); + const pagination = useAppSelector((state) => state.history.pagination); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState<string | null>(null); + const loadingRef = useRef(false); + const cursorRef = useRef<string | null>(null); + + const loadMore = useCallback(async () => { + if (loadingRef.current || !pagination.hasMore) return; + if (!pagination.cursor) return; + if (cursorRef.current === pagination.cursor) return; + + loadingRef.current = true; + cursorRef.current = pagination.cursor; + setIsLoading(true); + setError(null); + + try { + const result = await dispatch( + trajectoriesApi.endpoints.listTrajectoriesPaginated.initiate( + { + limit: 50, + cursor: pagination.cursor, + }, + { forceRefetch: true }, + ), + ).unwrap(); + + dispatch(hydrateHistoryFromMeta(result.items)); + dispatch( + setPagination({ + cursor: result.next_cursor, + hasMore: result.has_more, + }), + ); + } catch (err) { + setError(err instanceof Error ? err.message : "Failed to load more"); + } finally { + loadingRef.current = false; + setIsLoading(false); + } + }, [dispatch, pagination.hasMore, pagination.cursor]); + + const retry = useCallback(() => { + setError(null); + cursorRef.current = null; + }, []); + + return { + loadMore, + retry, + isLoading, + hasMore: pagination.hasMore, + error, + }; +} diff --git a/refact-agent/gui/src/hooks/useLogin.ts b/refact-agent/gui/src/hooks/useLogin.ts index 9c70e31ab..8324b2553 100644 --- a/refact-agent/gui/src/hooks/useLogin.ts +++ b/refact-agent/gui/src/hooks/useLogin.ts @@ -10,6 +10,8 @@ import { } from "../features/Config/configSlice"; import { useOpenUrl } from "./useOpenUrl"; import { useEventsBusForIDE } from "./useEventBusForIDE"; +import { push } from "../features/Pages/pagesSlice"; +import { newChatAction } from "../features/Chat"; function makeTicket() { return ( @@ -28,9 +30,11 @@ export const useEmailLogin = () => { const [aborted, setAborted] = useState<boolean>(false); const [timeoutN, setTimeoutN] = useState<NodeJS.Timeout>(); const abortRef = useRef<() => void>(() => ({})); + const hasNavigated = useRef(false); const emailLogin = useCallback( (email: string) => { + hasNavigated.current = false; const token = makeTicket(); const action = emailLoginTrigger({ email, token }); abortRef.current = () => action.abort(); @@ -51,7 +55,12 @@ export const useEmailLogin = () => { abortRef.current = () => action.abort(); }, 5000); setTimeoutN(timer); - } else if (args && emailLoginResult.data?.status === "user_logged_in") { + } else if ( + args && + emailLoginResult.data?.status === "user_logged_in" && + !hasNavigated.current + ) { + hasNavigated.current = true; dispatch(setApiKey(emailLoginResult.data.key)); dispatch(setAddressURL("Refact")); setupHost({ @@ -59,6 +68,9 @@ export const useEmailLogin = () => { apiKey: emailLoginResult.data.key, userName: args.email, }); + dispatch(push({ name: "history" })); + dispatch(newChatAction()); + dispatch(push({ name: "chat" })); } }, [aborted, dispatch, emailLoginResult, emailLoginTrigger, setupHost]); @@ -92,6 +104,7 @@ export const useLogin = () => { const { setupHost } = useEventsBusForIDE(); const dispatch = useAppDispatch(); const abortRef = useRef<() => void>(() => ({})); + const hasNavigated = useRef(false); const host = useAppSelector(selectHost); const openUrl = useOpenUrl(); @@ -100,6 +113,7 @@ export const useLogin = () => { const loginWithProvider = useCallback( (provider: "google" | "github") => { + hasNavigated.current = false; const ticket = makeTicket(); const baseUrl = new URL(`https://refact.smallcloud.ai/authentication`); baseUrl.searchParams.set("token", ticket); @@ -116,19 +130,20 @@ export const useLogin = () => { ); useEffect(() => { - if (isGoodResponse(loginPollingResult.data)) { - const actions = [ - setApiKey(loginPollingResult.data.secret_key), - setAddressURL("Refact"), - ]; - - actions.forEach((action) => dispatch(action)); + if (isGoodResponse(loginPollingResult.data) && !hasNavigated.current) { + hasNavigated.current = true; + dispatch(setApiKey(loginPollingResult.data.secret_key)); + dispatch(setAddressURL("Refact")); setupHost({ type: "cloud", apiKey: loginPollingResult.data.secret_key, userName: loginPollingResult.data.account, }); + + dispatch(push({ name: "history" })); + dispatch(newChatAction()); + dispatch(push({ name: "chat" })); } }, [dispatch, loginPollingResult.data, setupHost]); diff --git a/refact-agent/gui/src/hooks/usePreviewCheckpoints.ts b/refact-agent/gui/src/hooks/usePreviewCheckpoints.ts index 2090f636f..6cafea398 100644 --- a/refact-agent/gui/src/hooks/usePreviewCheckpoints.ts +++ b/refact-agent/gui/src/hooks/usePreviewCheckpoints.ts @@ -7,8 +7,8 @@ export const usePreviewCheckpoints = () => { checkpointsApi.usePreviewCheckpointsMutation(); const previewChangesFromCheckpoints = useCallback( - (checkpoints: Checkpoint[]) => { - return mutationTrigger({ checkpoints }); + (checkpoints: Checkpoint[], chat_id: string, chat_mode?: string) => { + return mutationTrigger({ checkpoints, chat_id, chat_mode }); }, [mutationTrigger], ); diff --git a/refact-agent/gui/src/hooks/useProvidersQuery.ts b/refact-agent/gui/src/hooks/useProvidersQuery.ts index c1081843d..a898ff5cc 100644 --- a/refact-agent/gui/src/hooks/useProvidersQuery.ts +++ b/refact-agent/gui/src/hooks/useProvidersQuery.ts @@ -4,10 +4,6 @@ export function useGetConfiguredProvidersQuery() { return providersApi.useGetConfiguredProvidersQuery(undefined); } -export function useGetProviderTemplatesQuery() { - return providersApi.useGetProviderTemplatesQuery(undefined); -} - export function useGetProviderQuery({ providerName, }: { @@ -16,12 +12,34 @@ export function useGetProviderQuery({ return providersApi.useGetProviderQuery({ providerName }); } +export function useGetProviderSchemaQuery({ + providerName, +}: { + providerName: string; +}) { + return providersApi.useGetProviderSchemaQuery({ providerName }); +} + +export function useGetProviderModelsQuery({ + providerName, +}: { + providerName: string; +}) { + return providersApi.useGetProviderModelsQuery({ providerName }); +} + export function useUpdateProviderMutation() { - const [mutationTrigger] = providersApi.useUpdateProviderMutation(); - return mutationTrigger; + return providersApi.useUpdateProviderMutation(); } export function useDeleteProviderMutation() { - const [mutationTrigger] = providersApi.useDeleteProviderMutation(); - return mutationTrigger; + return providersApi.useDeleteProviderMutation(); +} + +export function useGetDefaultsQuery() { + return providersApi.useGetDefaultsQuery(undefined); +} + +export function useUpdateDefaultsMutation() { + return providersApi.useUpdateDefaultsMutation(); } diff --git a/refact-agent/gui/src/hooks/useRestoreCheckpoints.ts b/refact-agent/gui/src/hooks/useRestoreCheckpoints.ts index 435fea299..7961b2fc8 100644 --- a/refact-agent/gui/src/hooks/useRestoreCheckpoints.ts +++ b/refact-agent/gui/src/hooks/useRestoreCheckpoints.ts @@ -7,8 +7,8 @@ export const useRestoreCheckpoints = () => { checkpointsApi.useRestoreCheckpointsMutation(); const restoreChangesFromCheckpoints = useCallback( - (checkpoints: Checkpoint[]) => { - return mutationTrigger({ checkpoints }); + (checkpoints: Checkpoint[], chat_id: string, chat_mode?: string) => { + return mutationTrigger({ checkpoints, chat_id, chat_mode }); }, [mutationTrigger], ); diff --git a/refact-agent/gui/src/hooks/useSendChatCommand.ts b/refact-agent/gui/src/hooks/useSendChatCommand.ts new file mode 100644 index 000000000..03829e6ff --- /dev/null +++ b/refact-agent/gui/src/hooks/useSendChatCommand.ts @@ -0,0 +1,19 @@ +import { useCallback } from "react"; +import { useAppSelector } from "./useAppSelector"; +import { selectLspPort, selectApiKey } from "../features/Config/configSlice"; +import { + sendChatCommand, + type ChatCommandBase, +} from "../services/refact/chatCommands"; + +export function useSendChatCommand() { + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + + return useCallback( + async (chatId: string, command: ChatCommandBase) => { + await sendChatCommand(chatId, port, apiKey ?? undefined, command); + }, + [port, apiKey], + ); +} diff --git a/refact-agent/gui/src/hooks/useSendChatRequest.ts b/refact-agent/gui/src/hooks/useSendChatRequest.ts deleted file mode 100644 index 3788321d8..000000000 --- a/refact-agent/gui/src/hooks/useSendChatRequest.ts +++ /dev/null @@ -1,477 +0,0 @@ -import { useCallback, useEffect, useMemo } from "react"; -import { useAppDispatch } from "./useAppDispatch"; -import { useAppSelector } from "./useAppSelector"; -import { - getSelectedSystemPrompt, - selectAutomaticPatch, - selectChatError, - selectChatId, - selectCheckpointsEnabled, - selectHasUncalledTools, - selectIntegration, - selectIsStreaming, - selectIsWaiting, - selectMessages, - selectPreventSend, - selectQueuedMessages, - selectSendImmediately, - selectThread, - selectThreadMode, - selectThreadToolUse, -} from "../features/Chat/Thread/selectors"; -import { useCheckForConfirmationMutation } from "./useGetToolGroupsQuery"; -import { - ChatMessage, - ChatMessages, - isAssistantMessage, - isUserMessage, - UserMessage, - UserMessageContentWithImage, -} from "../services/refact/types"; -import { - backUpMessages, - chatAskQuestionThunk, - chatAskedQuestion, - setSendImmediately, - enqueueUserMessage, - dequeueUserMessage, -} from "../features/Chat/Thread/actions"; - -import { selectAllImages } from "../features/AttachedImages"; -import { useAbortControllers } from "./useAbortControllers"; -import { - clearPauseReasonsAndHandleToolsStatus, - getToolsConfirmationStatus, - getToolsInteractionStatus, - resetConfirmationInteractedState, - setPauseReasons, -} from "../features/ToolConfirmation/confirmationSlice"; -import { - chatModeToLspMode, - doneStreaming, - fixBrokenToolMessages, - LspChatMode, - setChatMode, - setIsWaitingForResponse, - setLastUserMessageId, - setPreventSend, - upsertToolCall, -} from "../features/Chat"; - -import { v4 as uuidv4 } from "uuid"; -import { upsertToolCallIntoHistory } from "../features/History/historySlice"; - -type SendPolicy = "immediate" | "after_flow"; - -type SubmitHandlerParams = - | { - question: string; - maybeMode?: LspChatMode; - maybeMessages?: undefined; - maybeDropLastMessage?: boolean; - sendPolicy?: SendPolicy; - } - | { - question?: undefined; - maybeMode?: LspChatMode; - maybeMessages?: undefined; - maybeDropLastMessage?: boolean; - sendPolicy?: SendPolicy; - } - | { - question?: undefined; - maybeMode?: LspChatMode; - maybeMessages: ChatMessage[]; - maybeDropLastMessage?: boolean; - sendPolicy?: SendPolicy; - }; - -export const PATCH_LIKE_FUNCTIONS = [ - "patch", - "text_edit", - "create_textdoc", - "update_textdoc", - "replace_textdoc", - "update_textdoc_regex", - "update_textdoc_by_lines", -]; - -export const useSendChatRequest = () => { - const dispatch = useAppDispatch(); - const abortControllers = useAbortControllers(); - - // const [triggerGetTools] = useGetToolsLazyQuery(); - const [triggerCheckForConfirmation] = useCheckForConfirmationMutation(); - - const chatId = useAppSelector(selectChatId); - - const isWaiting = useAppSelector(selectIsWaiting); - const isStreaming = useAppSelector(selectIsStreaming); - const hasUnsentTools = useAppSelector(selectHasUncalledTools); - - const isBusy = isWaiting || isStreaming || hasUnsentTools; - - const currentMessages = useAppSelector(selectMessages); - const systemPrompt = useAppSelector(getSelectedSystemPrompt); - const toolUse = useAppSelector(selectThreadToolUse); - const attachedImages = useAppSelector(selectAllImages); - const threadMode = useAppSelector(selectThreadMode); - const threadIntegration = useAppSelector(selectIntegration); - const wasInteracted = useAppSelector(getToolsInteractionStatus); // shows if tool confirmation popup was interacted by user - const areToolsConfirmed = useAppSelector(getToolsConfirmationStatus); - - const isPatchAutomatic = useAppSelector(selectAutomaticPatch); - const checkpointsEnabled = useAppSelector(selectCheckpointsEnabled); - - const messagesWithSystemPrompt = useMemo(() => { - const prompts = Object.entries(systemPrompt); - if (prompts.length === 0) return currentMessages; - const [key, prompt] = prompts[0]; - if (key === "default") return currentMessages; - if (currentMessages.length === 0) { - const message: ChatMessage = { role: "system", content: prompt.text }; - return [message]; - } - return currentMessages; - }, [currentMessages, systemPrompt]); - - const sendMessages = useCallback( - async (messages: ChatMessages, maybeMode?: LspChatMode) => { - dispatch(setIsWaitingForResponse(true)); - const lastMessage = messages.slice(-1)[0]; - - if ( - !isWaiting && - !wasInteracted && - isAssistantMessage(lastMessage) && - lastMessage.tool_calls - ) { - const toolCalls = lastMessage.tool_calls; - if ( - !( - toolCalls[0].function.name && - PATCH_LIKE_FUNCTIONS.includes(toolCalls[0].function.name) && - isPatchAutomatic - ) - ) { - const confirmationResponse = await triggerCheckForConfirmation({ - tool_calls: toolCalls, - messages: messages, - }).unwrap(); - if (confirmationResponse.pause) { - dispatch(setPauseReasons(confirmationResponse.pause_reasons)); - return; - } - } - } - - dispatch(backUpMessages({ id: chatId, messages })); - dispatch(chatAskedQuestion({ id: chatId })); - - const mode = - maybeMode ?? chatModeToLspMode({ toolUse, mode: threadMode }); - - const maybeLastUserMessageIsFromUser = isUserMessage(lastMessage); - if (maybeLastUserMessageIsFromUser) { - dispatch(setLastUserMessageId({ chatId: chatId, messageId: uuidv4() })); - } - - const action = chatAskQuestionThunk({ - messages, - checkpointsEnabled, - chatId, - mode, - }); - - const dispatchedAction = dispatch(action); - abortControllers.addAbortController(chatId, dispatchedAction.abort); - }, - [ - toolUse, - isWaiting, - dispatch, - chatId, - threadMode, - wasInteracted, - checkpointsEnabled, - abortControllers, - triggerCheckForConfirmation, - isPatchAutomatic, - ], - ); - - const maybeAddImagesToQuestion = useCallback( - (question: string): UserMessage => { - if (attachedImages.length === 0) - return { role: "user" as const, content: question, checkpoints: [] }; - - const images = attachedImages.reduce<UserMessageContentWithImage[]>( - (acc, image) => { - if (typeof image.content !== "string") return acc; - return acc.concat({ - type: "image_url", - image_url: { url: image.content }, - }); - }, - [], - ); - - if (images.length === 0) - return { role: "user", content: question, checkpoints: [] }; - - return { - role: "user", - content: [...images, { type: "text", text: question }], - checkpoints: [], - }; - }, - [attachedImages], - ); - - const submit = useCallback( - ({ - question, - maybeMode, - maybeMessages, - maybeDropLastMessage, - sendPolicy = "after_flow", - }: SubmitHandlerParams) => { - let messages = messagesWithSystemPrompt; - if (maybeDropLastMessage) { - messages = messages.slice(0, -1); - } - - if (question) { - const message = maybeAddImagesToQuestion(question); - - // If busy, queue the message (priority = send at next available turn) - if (isBusy) { - dispatch( - enqueueUserMessage({ - id: uuidv4(), - message, - createdAt: Date.now(), - priority: sendPolicy === "immediate", - }), - ); - return; - } - - messages = messages.concat(message); - } else if (maybeMessages) { - messages = maybeMessages; - } - - // TODO: make a better way for setting / detecting thread mode. - const maybeConfigure = threadIntegration ? "CONFIGURE" : undefined; - const mode = chatModeToLspMode({ - toolUse, - mode: maybeMode ?? threadMode ?? maybeConfigure, - }); - dispatch(setChatMode(mode)); - - void sendMessages(messages, mode); - }, - [ - dispatch, - isBusy, - maybeAddImagesToQuestion, - messagesWithSystemPrompt, - sendMessages, - threadIntegration, - threadMode, - toolUse, - ], - ); - - const abort = useCallback(() => { - abortControllers.abort(chatId); - dispatch(setPreventSend({ id: chatId })); - dispatch(fixBrokenToolMessages({ id: chatId })); - dispatch(setIsWaitingForResponse(false)); - dispatch(doneStreaming({ id: chatId })); - }, [abortControllers, chatId, dispatch]); - - const retry = useCallback( - (messages: ChatMessages) => { - abort(); - dispatch( - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: false, - confirmationStatus: areToolsConfirmed, - }), - ); - void sendMessages(messages); - }, - [abort, sendMessages, dispatch, areToolsConfirmed], - ); - - const confirmToolUsage = useCallback(() => { - dispatch( - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: true, - confirmationStatus: true, - }), - ); - - dispatch(setIsWaitingForResponse(false)); - }, [dispatch]); - - const rejectToolUsage = useCallback( - (toolCallIds: string[]) => { - toolCallIds.forEach((toolCallId) => { - dispatch( - upsertToolCallIntoHistory({ toolCallId, chatId, accepted: false }), - ); - dispatch(upsertToolCall({ toolCallId, chatId, accepted: false })); - }); - - dispatch(resetConfirmationInteractedState()); - dispatch(setIsWaitingForResponse(false)); - dispatch(doneStreaming({ id: chatId })); - dispatch(setPreventSend({ id: chatId })); - }, - [chatId, dispatch], - ); - - const retryFromIndex = useCallback( - (index: number, question: UserMessage["content"]) => { - const messagesToKeep = currentMessages.slice(0, index); - const messagesToSend = messagesToKeep.concat([ - { role: "user", content: question, checkpoints: [] }, - ]); - retry(messagesToSend); - }, - [currentMessages, retry], - ); - - return { - submit, - abort, - retry, - retryFromIndex, - confirmToolUsage, - maybeAddImagesToQuestion, - rejectToolUsage, - sendMessages, - messagesWithSystemPrompt, - }; -}; - -// NOTE: only use this once -export function useAutoSend() { - const dispatch = useAppDispatch(); - const streaming = useAppSelector(selectIsStreaming); - const currentMessages = useAppSelector(selectMessages); - const errored = useAppSelector(selectChatError); - const preventSend = useAppSelector(selectPreventSend); - const isWaiting = useAppSelector(selectIsWaiting); - const sendImmediately = useAppSelector(selectSendImmediately); - const wasInteracted = useAppSelector(getToolsInteractionStatus); // shows if tool confirmation popup was interacted by user - const areToolsConfirmed = useAppSelector(getToolsConfirmationStatus); - const hasUnsentTools = useAppSelector(selectHasUncalledTools); - const queuedMessages = useAppSelector(selectQueuedMessages); - const { sendMessages, messagesWithSystemPrompt } = useSendChatRequest(); - // TODO: make a selector for this, or show tool formation - const thread = useAppSelector(selectThread); - const isIntegration = thread.integration ?? false; - - useEffect(() => { - if (sendImmediately) { - dispatch(setSendImmediately(false)); - void sendMessages(messagesWithSystemPrompt); - } - }, [dispatch, messagesWithSystemPrompt, sendImmediately, sendMessages]); - - const stop = useMemo(() => { - if (errored) return true; - if (preventSend) return true; - if (isWaiting) return true; - if (streaming) return true; - return !hasUnsentTools; - }, [errored, hasUnsentTools, isWaiting, preventSend, streaming]); - - const stopForToolConfirmation = useMemo(() => { - if (isIntegration) return false; - return !wasInteracted && !areToolsConfirmed; - }, [isIntegration, wasInteracted, areToolsConfirmed]); - - // Base conditions for flushing queue (streaming must be done) - const canFlushBase = useMemo(() => { - if (errored) return false; - if (preventSend) return false; - if (streaming) return false; - if (isWaiting) return false; - return true; - }, [errored, preventSend, streaming, isWaiting]); - - // Full idle: also wait for tools to complete (for regular queued messages) - const isFullyIdle = useMemo(() => { - if (!canFlushBase) return false; - if (hasUnsentTools) return false; - if (stopForToolConfirmation) return false; - return true; - }, [canFlushBase, hasUnsentTools, stopForToolConfirmation]); - - // Process queued messages - // Priority messages: flush as soon as streaming ends (next turn) - // Regular messages: wait for full idle (tools complete) - useEffect(() => { - if (queuedMessages.length === 0) return; - - const nextQueued = queuedMessages[0]; - const isPriority = nextQueued.priority; - - // Priority: flush when base conditions met (right after streaming) - // Regular: flush only when fully idle (after tools complete) - const canFlush = isPriority ? canFlushBase : isFullyIdle; - - if (!canFlush) return; - - // Remove from queue first to prevent double-send - dispatch(dequeueUserMessage({ queuedId: nextQueued.id })); - - // Send the queued message - void sendMessages([...currentMessages, nextQueued.message], thread.mode); - }, [ - canFlushBase, - isFullyIdle, - queuedMessages, - dispatch, - sendMessages, - currentMessages, - thread.mode, - ]); - - // Check if there are priority messages waiting - const hasPriorityMessages = useMemo( - () => queuedMessages.some((m) => m.priority), - [queuedMessages], - ); - - useEffect(() => { - if (stop) return; - if (stopForToolConfirmation) return; - // Don't run tool follow-up if there are priority messages waiting - // Let the queue flush handle them first - if (hasPriorityMessages) return; - - dispatch( - clearPauseReasonsAndHandleToolsStatus({ - wasInteracted: false, - confirmationStatus: areToolsConfirmed, - }), - ); - - void sendMessages(currentMessages, thread.mode); - }, [ - areToolsConfirmed, - currentMessages, - dispatch, - hasPriorityMessages, - sendMessages, - stop, - stopForToolConfirmation, - thread.mode, - ]); -} diff --git a/refact-agent/gui/src/hooks/useShiki.ts b/refact-agent/gui/src/hooks/useShiki.ts new file mode 100644 index 000000000..93b4c5e0a --- /dev/null +++ b/refact-agent/gui/src/hooks/useShiki.ts @@ -0,0 +1,238 @@ +import { useEffect, useState, useCallback } from "react"; +import { + createHighlighter, + type Highlighter, + type BundledLanguage, + type BundledTheme, +} from "shiki"; + +let highlighterInstance: Highlighter | null = null; +let highlighterPromise: Promise<Highlighter> | null = null; + +// Serialization queue: ensures only one codeToHtml() WASM call runs at a time. +// Prevents burst of concurrent WASM tokenizer invocations when switching chats +// or mounting many code blocks simultaneously (can cause SIGSEGV/SIGILL). +let highlightQueue = Promise.resolve(); + +function yieldToMain(): Promise<void> { + return new Promise((resolve) => setTimeout(resolve, 0)); +} + +function enqueueHighlight<T>(fn: () => T): Promise<T> { + const task = highlightQueue.then( + () => yieldToMain().then(fn), + () => yieldToMain().then(fn), + ); + // eslint-disable-next-line @typescript-eslint/no-empty-function + const noop = () => {}; + highlightQueue = task.then(noop, noop); + return task; +} + +const INITIAL_LANGUAGES: BundledLanguage[] = [ + "javascript", + "typescript", + "python", + "rust", + "go", + "java", + "c", + "cpp", + "csharp", + "html", + "css", + "json", + "yaml", + "markdown", + "bash", + "shell", + "sql", + "dockerfile", + "tsx", + "jsx", +]; + +const LIGHT_THEME: BundledTheme = "github-light"; +const DARK_THEME: BundledTheme = "github-dark"; + +async function getHighlighter(): Promise<Highlighter> { + if (highlighterInstance) { + return highlighterInstance; + } + + if (highlighterPromise) { + return highlighterPromise; + } + + highlighterPromise = createHighlighter({ + themes: [LIGHT_THEME, DARK_THEME], + langs: INITIAL_LANGUAGES, + }) + .then((h: Highlighter) => { + highlighterInstance = h; + return h; + }) + .catch((err: unknown) => { + highlighterPromise = null; + throw err; + }); + + return highlighterPromise; +} + +const LANGUAGE_ALIASES: Record<string, string> = { + js: "javascript", + ts: "typescript", + py: "python", + rb: "ruby", + sh: "bash", + zsh: "bash", + yml: "yaml", + md: "markdown", + rs: "rust", + cs: "csharp", + "c++": "cpp", + "c#": "csharp", + plaintext: "plaintext", + plain: "plaintext", + text: "plaintext", +}; + +function normalizeLanguage(lang: string): string { + const lower = lang.toLowerCase(); + const alias = LANGUAGE_ALIASES[lower] as string | undefined; + return alias ?? lower; +} + +export type ShikiHighlightResult = { + html: string; + language: string; +}; + +export function useShiki() { + const [highlighter, setHighlighter] = useState<Highlighter | null>( + highlighterInstance, + ); + const [isLoading, setIsLoading] = useState(!highlighterInstance); + const [error, setError] = useState<Error | null>(null); + + useEffect(() => { + if (highlighterInstance) { + setHighlighter(highlighterInstance); + setIsLoading(false); + return; + } + + let mounted = true; + + void getHighlighter() + .then((h: Highlighter) => { + if (mounted) { + setHighlighter(h); + setIsLoading(false); + } + }) + .catch((err: unknown) => { + if (mounted) { + setError(err instanceof Error ? err : new Error(String(err))); + setIsLoading(false); + } + }); + + return () => { + mounted = false; + }; + }, []); + + const highlight = useCallback( + async ( + code: string, + language: string, + isDark: boolean, + ): Promise<ShikiHighlightResult> => { + const h = highlighter ?? (await getHighlighter()); + const normalizedLang = normalizeLanguage(language); + const theme = isDark ? DARK_THEME : LIGHT_THEME; + + const loadedLangs = ( + h as unknown as { getLoadedLanguages(): BundledLanguage[] } + ).getLoadedLanguages(); + let finalLang = normalizedLang; + + if (!loadedLangs.includes(normalizedLang as BundledLanguage)) { + try { + await ( + h as unknown as { + loadLanguage(lang: BundledLanguage): Promise<void>; + } + ).loadLanguage(normalizedLang as BundledLanguage); + } catch { + finalLang = "plaintext"; + } + } + + const html = await enqueueHighlight(() => + ( + h as unknown as { + codeToHtml( + code: string, + options: { lang: string; theme: BundledTheme }, + ): string; + } + ).codeToHtml(code, { + lang: finalLang, + theme, + }), + ); + + return { html, language: finalLang }; + }, + [highlighter], + ); + + const highlightSync = useCallback( + ( + code: string, + language: string, + isDark: boolean, + ): ShikiHighlightResult | null => { + if (!highlighter) return null; + + const normalizedLang = normalizeLanguage(language); + const theme = isDark ? DARK_THEME : LIGHT_THEME; + const loadedLangs = ( + highlighter as unknown as { getLoadedLanguages(): BundledLanguage[] } + ).getLoadedLanguages(); + + const finalLang = loadedLangs.includes(normalizedLang as BundledLanguage) + ? normalizedLang + : "plaintext"; + + const html = ( + highlighter as unknown as { + codeToHtml( + code: string, + options: { lang: string; theme: BundledTheme }, + ): string; + } + ).codeToHtml(code, { + lang: finalLang, + theme, + }); + + return { html, language: finalLang }; + }, + [highlighter], + ); + + return { + highlighter, + isLoading, + error, + highlight, + highlightSync, + isReady: !!highlighter && !isLoading, + }; +} + +export { LIGHT_THEME, DARK_THEME }; diff --git a/refact-agent/gui/src/hooks/useSidebarSubscription.ts b/refact-agent/gui/src/hooks/useSidebarSubscription.ts new file mode 100644 index 000000000..850e5cdf1 --- /dev/null +++ b/refact-agent/gui/src/hooks/useSidebarSubscription.ts @@ -0,0 +1,511 @@ +import { useEffect, useRef, useCallback } from "react"; +import { useAppDispatch } from "./useAppDispatch"; +import { useConfig } from "./useConfig"; +import { + subscribeToSidebarEvents, + SidebarEventEnvelope, +} from "../services/refact/sidebarSubscription"; +import type { TrajectoryMeta } from "../services/refact/trajectories"; +import { + hydrateHistoryFromMeta, + deleteChatById, + updateChatMetaById, + setHistoryLoading, + setHistoryLoadError, + setPagination, +} from "../features/History/historySlice"; +import type { ChatHistoryItem } from "../features/History/historySlice"; +import { + updateOpenThread, + closeThread, + updateChatRuntimeFromSessionState, +} from "../features/Chat/Thread"; +import { tasksApi } from "../services/refact/tasks"; +import { + trajectoriesApi, + chatThreadToTrajectoryData, +} from "../services/refact/trajectories"; +import { useAppSelector } from "./useAppSelector"; + +const RECONNECT_DELAY_MS = 500; +const MIGRATION_KEY = "refact-trajectories-migrated"; + +function getLegacyHistory(): ChatHistoryItem[] { + try { + const raw = localStorage.getItem("persist:root"); + if (!raw) return []; + + const parsed = JSON.parse(raw) as Record<string, string>; + if (!parsed.history) return []; + + const historyData = JSON.parse(parsed.history) as unknown; + if (typeof historyData !== "object" || historyData === null) return []; + + const historyObj = historyData as Record<string, unknown>; + const chats = + "chats" in historyObj && typeof historyObj.chats === "object" + ? (historyObj.chats as Record<string, ChatHistoryItem>) + : (historyObj as Record<string, ChatHistoryItem>); + + const values = Object.values(chats) as unknown[]; + return values.filter((item): item is ChatHistoryItem => { + if (typeof item !== "object" || item === null) return false; + const obj = item as Record<string, unknown>; + return "id" in obj && "messages" in obj && Array.isArray(obj.messages); + }); + } catch { + return []; + } +} + +function clearLegacyHistory() { + try { + const raw = localStorage.getItem("persist:root"); + if (!raw) return; + + const parsed = JSON.parse(raw) as Record<string, string>; + parsed.history = "{}"; + localStorage.setItem("persist:root", JSON.stringify(parsed)); + } catch { + // Ignore localStorage errors + } +} + +function isMigrationDone(): boolean { + return localStorage.getItem(MIGRATION_KEY) === "true"; +} + +function markMigrationDone() { + localStorage.setItem(MIGRATION_KEY, "true"); +} + +export function useSidebarSubscription() { + const dispatch = useAppDispatch(); + const config = useConfig(); + const historyChats = useAppSelector((state) => state.history.chats); + const historyRef = useRef(historyChats); + historyRef.current = historyChats; + const disconnectRef = useRef<(() => void) | null>(null); + const reconnectTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>( + null, + ); + const initialLoadDoneRef = useRef(false); + // eslint-disable-next-line @typescript-eslint/no-empty-function + const connectRef = useRef<() => void>(() => {}); + + const processTrajectoryEvent = useCallback( + (event: SidebarEventEnvelope & { category: "trajectory" }) => { + if (event.type === "deleted") { + dispatch(deleteChatById(event.id)); + dispatch(closeThread({ id: event.id, force: true })); + return; + } + + const existsInHistory = event.id in historyRef.current; + const hasMetaUpdate = + event.title !== undefined || + event.updated_at !== undefined || + event.session_state !== undefined || + event.message_count !== undefined || + event.parent_id !== undefined || + event.link_type !== undefined || + event.root_chat_id !== undefined || + event.is_title_generated !== undefined || + event.error !== undefined || + event.model !== undefined || + event.mode !== undefined || + event.total_coins !== undefined || + event.total_lines_added !== undefined || + event.total_lines_removed !== undefined || + event.tasks_total !== undefined || + event.tasks_done !== undefined || + event.tasks_failed !== undefined; + + if (existsInHistory && hasMetaUpdate) { + const metaPatch: Record<string, unknown> = { id: event.id }; + if (event.title !== undefined) metaPatch.title = event.title; + if (event.is_title_generated !== undefined) + metaPatch.isTitleGenerated = event.is_title_generated; + if (event.updated_at !== undefined) + metaPatch.updatedAt = event.updated_at; + if (event.session_state !== undefined) + metaPatch.session_state = event.session_state; + if (event.message_count !== undefined) + metaPatch.message_count = event.message_count; + if (event.parent_id !== undefined) + metaPatch.parent_id = event.parent_id; + if (event.link_type !== undefined) + metaPatch.link_type = event.link_type; + if (event.root_chat_id !== undefined) + metaPatch.root_chat_id = event.root_chat_id; + if (event.total_coins !== undefined) + metaPatch.total_coins = event.total_coins; + if (event.total_lines_added !== undefined) + metaPatch.total_lines_added = event.total_lines_added; + if (event.total_lines_removed !== undefined) + metaPatch.total_lines_removed = event.total_lines_removed; + if (event.model !== undefined) metaPatch.model = event.model; + if (event.mode !== undefined) metaPatch.mode = event.mode; + if (event.tasks_total !== undefined) + metaPatch.tasks_total = event.tasks_total; + if (event.tasks_done !== undefined) + metaPatch.tasks_done = event.tasks_done; + if (event.tasks_failed !== undefined) + metaPatch.tasks_failed = event.tasks_failed; + dispatch( + updateChatMetaById( + metaPatch as Parameters<typeof updateChatMetaById>[0], + ), + ); + + if ( + event.title !== undefined || + event.is_title_generated !== undefined + ) { + const threadPatch: Record<string, unknown> = {}; + if (event.title !== undefined) threadPatch.title = event.title; + if (event.is_title_generated !== undefined) + threadPatch.isTitleGenerated = event.is_title_generated; + if (Object.keys(threadPatch).length > 0) { + dispatch( + updateOpenThread({ + id: event.id, + thread: threadPatch as Parameters< + typeof updateOpenThread + >[0]["thread"], + }), + ); + } + } + if (event.session_state !== undefined) { + dispatch( + updateChatRuntimeFromSessionState({ + id: event.id, + session_state: event.session_state, + error: event.error, + }), + ); + } + } else if ( + !existsInHistory && + (event.title !== undefined || event.session_state !== undefined) && + event.updated_at + ) { + dispatch( + hydrateHistoryFromMeta([ + { + id: event.id, + title: event.title ?? "New Chat", + created_at: event.updated_at, + updated_at: event.updated_at, + model: event.model ?? "", + mode: event.mode ?? "AGENT", + message_count: event.message_count ?? 0, + session_state: event.session_state, + parent_id: event.parent_id, + link_type: event.link_type, + root_chat_id: event.root_chat_id, + total_coins: event.total_coins, + total_lines_added: event.total_lines_added ?? 0, + total_lines_removed: event.total_lines_removed ?? 0, + tasks_total: 0, + tasks_done: 0, + tasks_failed: 0, + }, + ]), + ); + const threadPatch: Record<string, unknown> = {}; + if (event.title !== undefined) threadPatch.title = event.title; + if (event.is_title_generated !== undefined) + threadPatch.isTitleGenerated = event.is_title_generated; + if (Object.keys(threadPatch).length > 0) { + dispatch( + updateOpenThread({ + id: event.id, + thread: threadPatch as Parameters< + typeof updateOpenThread + >[0]["thread"], + }), + ); + } + if (event.session_state !== undefined) { + dispatch( + updateChatRuntimeFromSessionState({ + id: event.id, + session_state: event.session_state, + error: event.error, + }), + ); + } + } + }, + [dispatch], + ); + + const processTaskEvent = useCallback( + (event: SidebarEventEnvelope & { category: "task" }) => { + switch (event.type) { + case "snapshot": + dispatch( + tasksApi.util.updateQueryData( + "listTasks", + undefined, + () => event.tasks, + ), + ); + break; + + case "task_created": + dispatch( + tasksApi.util.updateQueryData("listTasks", undefined, (draft) => { + const exists = draft.some((t) => t.id === event.task_id); + if (!exists) { + draft.unshift(event.meta); + } + }), + ); + break; + + case "task_updated": + dispatch( + tasksApi.util.updateQueryData("listTasks", undefined, (draft) => { + const index = draft.findIndex((t) => t.id === event.task_id); + if (index >= 0) { + const existing = draft[index]; + draft[index] = { + ...event.meta, + planner_session_state: + event.meta.planner_session_state ?? + existing.planner_session_state, + }; + } + draft.sort((a, b) => b.updated_at.localeCompare(a.updated_at)); + }), + ); + dispatch( + tasksApi.util.updateQueryData( + "getTask", + event.task_id, + (existing) => ({ + ...event.meta, + planner_session_state: + event.meta.planner_session_state ?? + existing.planner_session_state, + }), + ), + ); + break; + + case "task_deleted": + dispatch( + tasksApi.util.updateQueryData("listTasks", undefined, (draft) => { + const index = draft.findIndex((t) => t.id === event.task_id); + if (index >= 0) { + draft.splice(index, 1); + } + }), + ); + break; + + case "board_changed": + dispatch( + tasksApi.util.updateQueryData( + "getBoard", + event.task_id, + () => event.board, + ), + ); + break; + } + }, + [dispatch], + ); + + const processSnapshot = useCallback( + (event: SidebarEventEnvelope & { category: "snapshot" }) => { + const trajectoryItems = event.trajectories.map((t: TrajectoryMeta) => ({ + id: t.id, + title: t.title, + created_at: t.created_at, + updated_at: t.updated_at, + model: t.model, + mode: t.mode, + message_count: t.message_count, + session_state: t.session_state, + parent_id: t.parent_id, + link_type: t.link_type, + root_chat_id: t.root_chat_id, + total_coins: t.total_coins, + total_lines_added: t.total_lines_added, + total_lines_removed: t.total_lines_removed, + tasks_total: t.tasks_total, + tasks_done: t.tasks_done, + tasks_failed: t.tasks_failed, + })); + + dispatch(hydrateHistoryFromMeta(trajectoryItems)); + dispatch(setHistoryLoadError(null)); + dispatch(setHistoryLoading(false)); + + dispatch( + tasksApi.util.updateQueryData( + "listTasks", + undefined, + () => event.tasks, + ), + ); + }, + [dispatch], + ); + + const migrateFromLocalStorage = useCallback(async () => { + if (isMigrationDone()) return; + + const legacyChats = getLegacyHistory(); + if (legacyChats.length === 0) { + markMigrationDone(); + return; + } + + let successCount = 0; + for (const chat of legacyChats) { + if (chat.messages.length === 0) continue; + + try { + const trajectoryData = chatThreadToTrajectoryData( + { + ...chat, + new_chat_suggested: chat.new_chat_suggested ?? { + wasSuggested: false, + }, + }, + chat.createdAt, + ); + trajectoryData.updated_at = chat.updatedAt; + + await dispatch( + trajectoriesApi.endpoints.saveTrajectory.initiate(trajectoryData), + ).unwrap(); + successCount++; + } catch { + // Ignore individual chat migration failures + } + } + + if (successCount > 0) { + clearLegacyHistory(); + } + markMigrationDone(); + }, [dispatch]); + + const loadInitialHistory = useCallback(async () => { + dispatch(setHistoryLoading(true)); + try { + await migrateFromLocalStorage(); + + const result = await dispatch( + trajectoriesApi.endpoints.listTrajectoriesPaginated.initiate( + { limit: 50 }, + { forceRefetch: true }, + ), + ).unwrap(); + + dispatch(hydrateHistoryFromMeta(result.items)); + dispatch( + setPagination({ + cursor: result.next_cursor, + hasMore: result.has_more, + }), + ); + dispatch(setHistoryLoading(false)); + initialLoadDoneRef.current = true; + } catch (err) { + const message = + err instanceof Error ? err.message : "Failed to load history"; + dispatch(setHistoryLoadError(message)); + } + }, [dispatch, migrateFromLocalStorage]); + + const scheduleReconnect = useCallback(() => { + if (reconnectTimeoutRef.current) return; + reconnectTimeoutRef.current = setTimeout(() => { + reconnectTimeoutRef.current = null; + connectRef.current(); + }, RECONNECT_DELAY_MS); + }, []); + + const connect = useCallback(() => { + if (disconnectRef.current) { + disconnectRef.current(); + disconnectRef.current = null; + } + if (reconnectTimeoutRef.current) { + clearTimeout(reconnectTimeoutRef.current); + reconnectTimeoutRef.current = null; + } + + const port = config.lspPort; + const apiKey = config.apiKey ?? null; + + if (port <= 0 || port > 65535) { + scheduleReconnect(); + return; + } + + const onEvent = (envelope: SidebarEventEnvelope) => { + if (envelope.category === "snapshot") { + processSnapshot( + envelope as SidebarEventEnvelope & { category: "snapshot" }, + ); + } else if (envelope.category === "trajectory") { + processTrajectoryEvent( + envelope as SidebarEventEnvelope & { category: "trajectory" }, + ); + } else { + processTaskEvent( + envelope as SidebarEventEnvelope & { category: "task" }, + ); + } + }; + + const onError = (error: Error) => { + if (!initialLoadDoneRef.current) { + dispatch(setHistoryLoadError(error.message)); + } + scheduleReconnect(); + }; + + const onDisconnected = () => { + scheduleReconnect(); + }; + + disconnectRef.current = subscribeToSidebarEvents(port, apiKey, { + onEvent, + onError, + onDisconnected, + }); + }, [ + dispatch, + config.lspPort, + config.apiKey, + processSnapshot, + processTrajectoryEvent, + processTaskEvent, + scheduleReconnect, + ]); + + connectRef.current = connect; + + useEffect(() => { + void loadInitialHistory(); + connect(); + return () => { + if (disconnectRef.current) { + disconnectRef.current(); + } + if (reconnectTimeoutRef.current) { + clearTimeout(reconnectTimeoutRef.current); + } + }; + }, [connect, loadInitialHistory]); +} diff --git a/refact-agent/gui/src/hooks/useStreamingVoiceRecording.ts b/refact-agent/gui/src/hooks/useStreamingVoiceRecording.ts new file mode 100644 index 000000000..318961432 --- /dev/null +++ b/refact-agent/gui/src/hooks/useStreamingVoiceRecording.ts @@ -0,0 +1,321 @@ +import { useState, useRef, useCallback, useEffect } from "react"; +import { v4 as uuidv4 } from "uuid"; +import { + subscribeToVoiceStream, + sendVoiceChunk, + VoiceStreamEvent, +} from "../services/refact/voice"; + +export interface UseStreamingVoiceRecordingResult { + isRecording: boolean; + isFinishing: boolean; + transcript: string; + error: string | null; + startRecording: () => Promise<void>; + stopRecording: () => Promise<string>; + cancelRecording: () => void; +} + +function floatTo16BitPCM(samples: Float32Array): ArrayBuffer { + const buffer = new ArrayBuffer(samples.length * 2); + const view = new DataView(buffer); + for (let i = 0; i < samples.length; i++) { + const s = Math.max(-1, Math.min(1, samples[i])); + view.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7fff, true); + } + return buffer; +} + +function arrayBufferToBase64(buffer: ArrayBuffer): string { + const bytes = new Uint8Array(buffer); + let binary = ""; + for (let i = 0; i < bytes.byteLength; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); +} + +export function useStreamingVoiceRecording( + port: number, +): UseStreamingVoiceRecordingResult { + const [isRecording, setIsRecording] = useState(false); + const [isFinishing, setIsFinishing] = useState(false); + const [transcript, setTranscript] = useState(""); + const [error, setError] = useState<string | null>(null); + + const sessionIdRef = useRef<string>(""); + const streamRef = useRef<MediaStream | null>(null); + const audioContextRef = useRef<AudioContext | null>(null); + const processorRef = useRef<ScriptProcessorNode | null>(null); + const unsubscribeRef = useRef<(() => void) | null>(null); + const bufferRef = useRef<Float32Array[]>([]); + const sendIntervalRef = useRef<ReturnType<typeof setInterval> | null>(null); + const finalizeResolveRef = useRef<((text: string) => void) | null>(null); + const finalizeRejectRef = useRef<((err: Error) => void) | null>(null); + const recordingStartTimeRef = useRef<number>(0); + + const LIVE_WINDOW_SECONDS = 20; + + const cleanupStream = useCallback(() => { + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + }, []); + + const handleEvent = useCallback( + (event: VoiceStreamEvent) => { + if (event.type === "transcript") { + const elapsedSeconds = + (Date.now() - recordingStartTimeRef.current) / 1000; + const wasCut = !event.is_final && elapsedSeconds > LIVE_WINDOW_SECONDS; + setTranscript(wasCut ? "... " + event.text : event.text); + if (event.is_final) { + setIsFinishing(false); + finalizeResolveRef.current?.(event.text); + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + unsubscribeRef.current?.(); + unsubscribeRef.current = null; + cleanupStream(); + } + } else if (event.type === "error") { + setError(event.message); + setIsFinishing(false); + finalizeRejectRef.current?.(new Error(event.message)); + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + unsubscribeRef.current?.(); + unsubscribeRef.current = null; + cleanupStream(); + } else { + // event.type === "ended" + setIsFinishing(false); + finalizeRejectRef.current?.( + new Error("Stream ended without final transcript"), + ); + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + unsubscribeRef.current?.(); + unsubscribeRef.current = null; + cleanupStream(); + } + }, + [cleanupStream], + ); + + const sendBufferedAudio = useCallback( + async (final: boolean) => { + const hasAudio = bufferRef.current.length > 0; + + if (!hasAudio && !final) return; + + let base64 = ""; + + if (hasAudio) { + const totalLength = bufferRef.current.reduce( + (acc, arr) => acc + arr.length, + 0, + ); + const combined = new Float32Array(totalLength); + let offset = 0; + for (const arr of bufferRef.current) { + combined.set(arr, offset); + offset += arr.length; + } + + if (!final) bufferRef.current = []; + + const pcmBuffer = floatTo16BitPCM(combined); + base64 = arrayBufferToBase64(pcmBuffer); + } + + await sendVoiceChunk(port, sessionIdRef.current, base64, final); + }, + [port], + ); + + const startRecording = useCallback(async () => { + setError(null); + setTranscript(""); + setIsFinishing(false); + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + bufferRef.current = []; + + // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition + if (!navigator.mediaDevices?.getUserMedia) { + setError("Microphone not supported in this browser or context"); + return; + } + + sessionIdRef.current = uuidv4(); + recordingStartTimeRef.current = Date.now(); + + unsubscribeRef.current = subscribeToVoiceStream( + port, + sessionIdRef.current, + undefined, + handleEvent, + (err) => { + setError(err.message); + setIsFinishing(false); + setIsRecording(false); + finalizeRejectRef.current?.(err); + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + cleanupStream(); + }, + ); + + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + streamRef.current = stream; + + const audioContext = new AudioContext({ sampleRate: 16000 }); + audioContextRef.current = audioContext; + + const source = audioContext.createMediaStreamSource(stream); + const processor = audioContext.createScriptProcessor(4096, 1, 1); + processorRef.current = processor; + + processor.onaudioprocess = (e) => { + const inputData = e.inputBuffer.getChannelData(0); + bufferRef.current.push(new Float32Array(inputData)); + }; + + source.connect(processor); + processor.connect(audioContext.destination); + + sendIntervalRef.current = setInterval(() => { + void sendBufferedAudio(false); + }, 1000); + + setIsRecording(true); + }, [port, handleEvent, sendBufferedAudio, cleanupStream]); + + const stopRecording = useCallback(async (): Promise<string> => { + if (!isRecording) throw new Error("Not recording"); + + setIsRecording(false); + setIsFinishing(true); + + const finalPromise = new Promise<string>((resolve, reject) => { + finalizeResolveRef.current = resolve; + finalizeRejectRef.current = reject; + }); + + if (sendIntervalRef.current) { + clearInterval(sendIntervalRef.current); + sendIntervalRef.current = null; + } + + if (processorRef.current) { + processorRef.current.disconnect(); + processorRef.current = null; + } + + if (audioContextRef.current) { + await audioContextRef.current.close(); + audioContextRef.current = null; + } + + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + + await sendBufferedAudio(true); + bufferRef.current = []; + + return finalPromise; + }, [isRecording, sendBufferedAudio]); + + const cancelRecording = useCallback(() => { + if (!isRecording && !isFinishing) return; + + setIsRecording(false); + setIsFinishing(false); + setTranscript(""); + + finalizeResolveRef.current = null; + finalizeRejectRef.current = null; + + if (sendIntervalRef.current) { + clearInterval(sendIntervalRef.current); + sendIntervalRef.current = null; + } + + if (processorRef.current) { + processorRef.current.disconnect(); + processorRef.current = null; + } + + if (audioContextRef.current) { + void audioContextRef.current.close(); + audioContextRef.current = null; + } + + unsubscribeRef.current?.(); + unsubscribeRef.current = null; + + cleanupStream(); + bufferRef.current = []; + }, [isRecording, isFinishing, cleanupStream]); + + useEffect(() => { + return () => { + unsubscribeRef.current?.(); + if (sendIntervalRef.current) clearInterval(sendIntervalRef.current); + if (processorRef.current) { + processorRef.current.disconnect(); + processorRef.current = null; + } + if (audioContextRef.current) { + void audioContextRef.current.close(); + audioContextRef.current = null; + } + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + }; + }, []); + + useEffect(() => { + if (!isRecording) return; + + const handleKeyDown = (event: KeyboardEvent) => { + if (event.repeat) return; + + const el = event.target as HTMLElement; + if ( + el.tagName === "INPUT" || + el.tagName === "TEXTAREA" || + el.isContentEditable + ) { + return; + } + + if (event.key === "Enter") { + event.preventDefault(); + void stopRecording(); + } else if (event.key === "Escape") { + event.preventDefault(); + cancelRecording(); + } + }; + + window.addEventListener("keydown", handleKeyDown); + return () => window.removeEventListener("keydown", handleKeyDown); + }, [isRecording, stopRecording, cancelRecording]); + + return { + isRecording, + isFinishing, + transcript, + error, + startRecording, + stopRecording, + cancelRecording, + }; +} diff --git a/refact-agent/gui/src/hooks/useThinking.ts b/refact-agent/gui/src/hooks/useThinking.ts index 6fdd05892..aa5055a97 100644 --- a/refact-agent/gui/src/hooks/useThinking.ts +++ b/refact-agent/gui/src/hooks/useThinking.ts @@ -1,4 +1,4 @@ -import { useCallback, useEffect, useMemo } from "react"; +import { useCallback, useMemo } from "react"; import { useCapsForToolUse } from "./useCapsForToolUse"; import { useAppSelector } from "./useAppSelector"; import { @@ -6,6 +6,7 @@ import { selectIsStreaming, selectIsWaiting, selectThreadBoostReasoning, + selectModel, setBoostReasoning, } from "../features/Chat"; import { useAppDispatch } from "./useAppDispatch"; @@ -17,17 +18,25 @@ export function useThinking() { const isStreaming = useAppSelector(selectIsStreaming); const isWaiting = useAppSelector(selectIsWaiting); const chatId = useAppSelector(selectChatId); + const threadModel = useAppSelector(selectModel); const isBoostReasoningEnabled = useAppSelector(selectThreadBoostReasoning); const caps = useCapsForToolUse(); const { data: userData } = useGetUser(); + const currentModel = threadModel || caps.currentModel; + const supportsBoostReasoning = useMemo(() => { const models = caps.data?.chat_models; - const item = models?.[caps.currentModel]; - return item?.supports_boost_reasoning ?? false; - }, [caps.data?.chat_models, caps.currentModel]); + const item = models?.[currentModel]; + if (!item) return false; + return ( + !!item.reasoning_effort_options?.length || + !!item.supports_thinking_budget || + !!item.supports_adaptive_thinking_budget + ); + }, [caps.data?.chat_models, currentModel]); const shouldBeTeasing = useMemo( () => userData?.inference === "FREE", @@ -42,7 +51,7 @@ export function useThinking() { const noteText = useMemo(() => { if (!supportsBoostReasoning) - return `Note: ${caps.currentModel} doesn't support thinking`; + return `Note: ${currentModel} doesn't support thinking`; if (isStreaming || isWaiting) return `Note: you can't ${ isBoostReasoningEnabled ? "disable" : "enable" @@ -52,7 +61,7 @@ export function useThinking() { isStreaming, isWaiting, isBoostReasoningEnabled, - caps.currentModel, + currentModel, ]); const handleReasoningChange = useCallback( @@ -64,17 +73,12 @@ export function useThinking() { [dispatch, chatId], ); - useEffect(() => { - if (!supportsBoostReasoning) { - dispatch(setBoostReasoning({ chatId, value: supportsBoostReasoning })); - } - }, [dispatch, chatId, supportsBoostReasoning, shouldBeDisabled]); - return { handleReasoningChange, shouldBeDisabled, shouldBeTeasing, noteText, areCapsInitialized: !caps.uninitialized, + supportsBoostReasoning, }; } diff --git a/refact-agent/gui/src/hooks/useTotalCostForChat.ts b/refact-agent/gui/src/hooks/useTotalCostForChat.ts index 80c774fb8..530e5ba38 100644 --- a/refact-agent/gui/src/hooks/useTotalCostForChat.ts +++ b/refact-agent/gui/src/hooks/useTotalCostForChat.ts @@ -2,6 +2,7 @@ import { selectMessages } from "../features/Chat"; import { getTotalCostMeteringForMessages, getTotalTokenMeteringForMessages, + getTotalUsdMeteringForMessages, } from "../utils/getMetering"; import { useAppSelector } from "./useAppSelector"; @@ -14,3 +15,8 @@ export const useTotalTokenMeteringForChat = () => { const messages = useAppSelector(selectMessages); return getTotalTokenMeteringForMessages(messages); }; + +export const useTotalUsdForChat = () => { + const messages = useAppSelector(selectMessages); + return getTotalUsdMeteringForMessages(messages); +}; diff --git a/refact-agent/gui/src/hooks/useTrajectoryOps.ts b/refact-agent/gui/src/hooks/useTrajectoryOps.ts new file mode 100644 index 000000000..2b6db9f03 --- /dev/null +++ b/refact-agent/gui/src/hooks/useTrajectoryOps.ts @@ -0,0 +1,220 @@ +import { useState, useCallback } from "react"; +import { useAppDispatch, useAppSelector } from "./index"; +import { selectChatId, selectThread } from "../features/Chat"; +import { + usePreviewTransformMutation, + useApplyTransformMutation, + usePreviewHandoffMutation, + useApplyHandoffMutation, + TransformOptions, + HandoffOptions, + TransformPreviewResponse, + HandoffPreviewResponse, +} from "../services/refact/trajectory"; +import { trajectoriesApi } from "../services/refact/trajectories"; +import { + createChatWithId, + requestSseRefresh, + closeThread, +} from "../features/Chat/Thread/actions"; +import { push } from "../features/Pages/pagesSlice"; +import { selectLspPort, selectApiKey } from "../features/Config/configSlice"; +import { regenerate } from "../services/refact/chatCommands"; + +export type TrajectoryTab = "compress" | "handoff"; + +export function useTrajectoryOps() { + const dispatch = useAppDispatch(); + const chatId = useAppSelector(selectChatId); + const thread = useAppSelector(selectThread); + const port = useAppSelector(selectLspPort); + const apiKey = useAppSelector(selectApiKey); + + const [activeTab, setActiveTab] = useState<TrajectoryTab>("compress"); + const [transformOptions, setTransformOptions] = useState<TransformOptions>({ + dedup_and_compress_context: true, + drop_all_context: false, + compress_non_agentic_tools: true, + drop_all_memories: false, + drop_project_information: false, + }); + const [handoffOptions, setHandoffOptions] = useState<HandoffOptions>({ + include_last_user_plus: false, + include_all_opened_context: false, + include_all_edited_context: false, + include_agentic_tools: false, + llm_summary_for_excluded: true, + include_all_user_assistant_only: false, + }); + + const [transformPreview, setTransformPreview] = + useState<TransformPreviewResponse | null>(null); + const [handoffPreview, setHandoffPreview] = + useState<HandoffPreviewResponse | null>(null); + + const [previewTransform, { isLoading: isPreviewingTransform }] = + usePreviewTransformMutation(); + const [applyTransform, { isLoading: isApplyingTransform }] = + useApplyTransformMutation(); + const [previewHandoff, { isLoading: isPreviewingHandoff }] = + usePreviewHandoffMutation(); + const [applyHandoff, { isLoading: isApplyingHandoff }] = + useApplyHandoffMutation(); + + const handlePreviewTransform = useCallback(async () => { + if (!chatId) return; + try { + const result = await previewTransform({ + chatId, + options: transformOptions, + }).unwrap(); + setTransformPreview(result); + } catch { + setTransformPreview(null); + } + }, [chatId, transformOptions, previewTransform]); + + const handleApplyTransform = useCallback(async () => { + if (!chatId) return false; + try { + await applyTransform({ chatId, options: transformOptions }).unwrap(); + setTransformPreview(null); + dispatch(requestSseRefresh({ chatId })); + return true; + } catch { + return false; + } + }, [chatId, transformOptions, applyTransform, dispatch]); + + const handlePreviewHandoff = useCallback(async () => { + if (!chatId) return; + try { + const result = await previewHandoff({ + chatId, + options: handoffOptions, + }).unwrap(); + setHandoffPreview(result); + } catch { + setHandoffPreview(null); + } + }, [chatId, handoffOptions, previewHandoff]); + + const handleApplyHandoff = useCallback(async () => { + if (!chatId || !thread) return false; + try { + const isTaskChat = thread.is_task_chat; + const taskMeta = thread.task_meta; + const oldChatId = chatId; + + const result = await applyHandoff({ + chatId, + options: handoffOptions, + }).unwrap(); + + await dispatch( + trajectoriesApi.endpoints.listAllTrajectories.initiate(undefined, { + forceRefetch: true, + }), + ); + + if (isTaskChat && taskMeta?.role === "planner") { + const taskId = taskMeta.task_id; + const now = new Date().toISOString(); + + dispatch(closeThread({ id: oldChatId, force: true })); + + dispatch( + createChatWithId({ + id: result.new_chat_id, + title: "", + isTaskChat: true, + mode: "TASK_PLANNER", + taskMeta: { task_id: taskId, role: "planner" }, + }), + ); + + const { addPlannerChat, setTaskActiveChat } = await import( + "../features/Tasks/tasksSlice" + ); + + dispatch( + addPlannerChat({ + taskId, + planner: { + id: result.new_chat_id, + title: "", + createdAt: now, + updatedAt: now, + }, + }), + ); + + dispatch( + setTaskActiveChat({ + taskId, + activeChat: { type: "planner", chatId: result.new_chat_id }, + }), + ); + + dispatch(requestSseRefresh({ chatId: result.new_chat_id })); + setHandoffPreview(null); + await regenerate(result.new_chat_id, port, apiKey ?? undefined); + } else { + dispatch(closeThread({ id: oldChatId, force: true })); + dispatch(createChatWithId({ id: result.new_chat_id })); + dispatch(requestSseRefresh({ chatId: result.new_chat_id })); + dispatch(push({ name: "chat" })); + setHandoffPreview(null); + await regenerate(result.new_chat_id, port, apiKey ?? undefined); + } + + return true; + } catch (error) { + // eslint-disable-next-line no-console + console.error("[handleApplyHandoff]", error); + return false; + } + }, [chatId, thread, handoffOptions, applyHandoff, dispatch, port, apiKey]); + + const clearPreviews = useCallback(() => { + setTransformPreview(null); + setHandoffPreview(null); + }, []); + + const updateTransformOption = useCallback( + (key: keyof TransformOptions, value: boolean) => { + setTransformOptions((prev) => ({ ...prev, [key]: value })); + setTransformPreview(null); + }, + [], + ); + + const updateHandoffOption = useCallback( + (key: keyof HandoffOptions, value: boolean) => { + setHandoffOptions((prev) => ({ ...prev, [key]: value })); + setHandoffPreview(null); + }, + [], + ); + + return { + chatId, + activeTab, + setActiveTab, + transformOptions, + handoffOptions, + transformPreview, + handoffPreview, + isPreviewingTransform, + isApplyingTransform, + isPreviewingHandoff, + isApplyingHandoff, + handlePreviewTransform, + handleApplyTransform, + handlePreviewHandoff, + handleApplyHandoff, + clearPreviews, + updateTransformOption, + updateHandoffOption, + }; +} diff --git a/refact-agent/gui/src/hooks/useVoiceInput.ts b/refact-agent/gui/src/hooks/useVoiceInput.ts new file mode 100644 index 000000000..4cd4697c8 --- /dev/null +++ b/refact-agent/gui/src/hooks/useVoiceInput.ts @@ -0,0 +1,128 @@ +import { useState, useCallback, useEffect } from "react"; +import { useStreamingVoiceRecording } from "./useStreamingVoiceRecording"; +import { + getVoiceStatus, + downloadVoiceModel, + VoiceStatusResponse, +} from "../services/refact/voice"; +import { useConfig } from "./useConfig"; + +export interface UseVoiceInputResult { + isRecording: boolean; + isFinishing: boolean; + isVoiceActive: boolean; + isDownloading: boolean; + downloadProgress: number; + error: string | null; + voiceEnabled: boolean; + modelLoaded: boolean; + liveTranscript: string; + toggleRecording: () => Promise<string | null>; + cancelRecording: () => void; +} + +export function useVoiceInput( + onTranscript: (text: string) => void, +): UseVoiceInputResult { + const config = useConfig(); + const port = config.lspPort; + const isJetBrains = config.host === "jetbrains"; + const { + isRecording, + isFinishing, + transcript, + error: recordingError, + startRecording, + stopRecording, + cancelRecording, + } = useStreamingVoiceRecording(port); + const [error, setError] = useState<string | null>(null); + const [status, setStatus] = useState<VoiceStatusResponse | null>(null); + + useEffect(() => { + if (recordingError) { + setError(recordingError); + } + }, [recordingError]); + + useEffect(() => { + if (isJetBrains) { + setStatus({ + enabled: false, + model_loaded: false, + model_name: "", + is_downloading: false, + download_progress: 0, + }); + return; + } + getVoiceStatus(port) + .then(setStatus) + .catch(() => setStatus(null)); + }, [port, isJetBrains]); + + useEffect(() => { + if (!status?.is_downloading) return; + + const interval = setInterval(() => { + getVoiceStatus(port) + .then(setStatus) + .catch(() => { + // Silently ignore errors during polling + }); + }, 1000); + + return () => clearInterval(interval); + }, [status?.is_downloading, port]); + + const toggleRecording = useCallback(async (): Promise<string | null> => { + setError(null); + + if (isRecording) { + try { + const finalText = await stopRecording(); + const trimmed = finalText.trim(); + if (trimmed) { + onTranscript(trimmed); + return trimmed; + } + return null; + } catch (err) { + const message = + err instanceof Error ? err.message : "Failed to get transcript"; + setError(message); + return null; + } + } else { + try { + await startRecording(); + } catch (err) { + const message = + err instanceof Error ? err.message : "Failed to start recording"; + if (message.includes("Model not downloaded")) { + downloadVoiceModel(port).catch(() => { + // Silently ignore download errors + }); + const newStatus = await getVoiceStatus(port).catch(() => null); + if (newStatus) setStatus(newStatus); + } + setError(message); + } + return null; + } + }, [isRecording, startRecording, stopRecording, onTranscript, port]); + + return { + isRecording, + isFinishing, + isVoiceActive: isRecording || isFinishing, + isDownloading: status?.is_downloading ?? false, + downloadProgress: status?.download_progress ?? 0, + error, + voiceEnabled: status?.enabled ?? false, + modelLoaded: status?.model_loaded ?? false, + liveTranscript: transcript, + toggleRecording, + cancelRecording, + }; +} diff --git a/refact-agent/gui/src/hooks/useVoiceRecording.ts b/refact-agent/gui/src/hooks/useVoiceRecording.ts new file mode 100644 index 000000000..8bd575f9e --- /dev/null +++ b/refact-agent/gui/src/hooks/useVoiceRecording.ts @@ -0,0 +1,216 @@ +/* eslint-disable @typescript-eslint/no-unnecessary-condition, no-console */ +import { useState, useRef, useCallback } from "react"; + +export interface UseVoiceRecordingResult { + isRecording: boolean; + isProcessing: boolean; + error: string | null; + startRecording: () => Promise<void>; + stopRecording: () => Promise<Blob | null>; + toggleRecording: () => Promise<Blob | null>; +} + +async function convertToWav(blob: Blob): Promise<Blob> { + const arrayBuffer = await blob.arrayBuffer(); + const audioContext = new AudioContext({ sampleRate: 16000 }); + const audioBuffer = await audioContext.decodeAudioData(arrayBuffer); + + const numChannels = 1; + const sampleRate = 16000; + const bitsPerSample = 16; + + let monoData: Float32Array; + if (audioBuffer.numberOfChannels === 1) { + monoData = audioBuffer.getChannelData(0); + } else { + const left = audioBuffer.getChannelData(0); + const right = audioBuffer.getChannelData(1); + monoData = new Float32Array(left.length); + for (let i = 0; i < left.length; i++) { + monoData[i] = (left[i] + right[i]) / 2; + } + } + + const numSamples = monoData.length; + const dataSize = numSamples * numChannels * (bitsPerSample / 8); + const buffer = new ArrayBuffer(44 + dataSize); + const view = new DataView(buffer); + + const writeString = (offset: number, str: string) => { + for (let i = 0; i < str.length; i++) { + view.setUint8(offset + i, str.charCodeAt(i)); + } + }; + + writeString(0, "RIFF"); + view.setUint32(4, 36 + dataSize, true); + writeString(8, "WAVE"); + writeString(12, "fmt "); + view.setUint32(16, 16, true); + view.setUint16(20, 1, true); + view.setUint16(22, numChannels, true); + view.setUint32(24, sampleRate, true); + view.setUint32(28, sampleRate * numChannels * (bitsPerSample / 8), true); + view.setUint16(32, numChannels * (bitsPerSample / 8), true); + view.setUint16(34, bitsPerSample, true); + writeString(36, "data"); + view.setUint32(40, dataSize, true); + + let offset = 44; + for (let i = 0; i < numSamples; i++) { + const sample = Math.max(-1, Math.min(1, monoData[i])); + view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7fff, true); + offset += 2; + } + + await audioContext.close(); + return new Blob([buffer], { type: "audio/wav" }); +} + +export function useVoiceRecording(): UseVoiceRecordingResult { + const [isRecording, setIsRecording] = useState(false); + const [isProcessing] = useState(false); + const [error, setError] = useState<string | null>(null); + + const mediaRecorderRef = useRef<MediaRecorder | null>(null); + const chunksRef = useRef<Blob[]>([]); + const streamRef = useRef<MediaStream | null>(null); + + const startRecording = useCallback(async () => { + setError(null); + chunksRef.current = []; + + if (!navigator.mediaDevices?.getUserMedia) { + setError("Microphone not supported in this browser"); + return; + } + + try { + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + console.log( + "Stream tracks:", + stream.getAudioTracks().map((t) => ({ + label: t.label, + enabled: t.enabled, + muted: t.muted, + readyState: t.readyState, + settings: t.getSettings(), + })), + ); + streamRef.current = stream; + + const audioTracks = stream.getAudioTracks(); + if (audioTracks.length === 0) { + setError("No audio track in stream"); + return; + } + + const track = audioTracks[0]; + if (track.muted) { + console.warn("Audio track is muted"); + } + + const mimeType = MediaRecorder.isTypeSupported("audio/ogg;codecs=opus") + ? "audio/ogg;codecs=opus" + : MediaRecorder.isTypeSupported("audio/webm;codecs=opus") + ? "audio/webm;codecs=opus" + : MediaRecorder.isTypeSupported("audio/webm") + ? "audio/webm" + : "audio/mp4"; + + const mediaRecorder = new MediaRecorder(stream, { mimeType }); + mediaRecorderRef.current = mediaRecorder; + + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0) { + chunksRef.current.push(event.data); + } + }; + + mediaRecorder.onerror = (event) => { + console.error("MediaRecorder error:", event); + setError("Recording error"); + }; + + mediaRecorder.start(); + setIsRecording(true); + } catch (err) { + console.error("Failed to start recording:", err); + let message = "Failed to start recording"; + if (err instanceof Error) { + if (err.name === "NotFoundError") { + message = "No microphone found. Please connect a microphone."; + } else if (err.name === "NotAllowedError") { + message = "Microphone access denied. Please allow microphone access."; + } else if (err.name === "NotReadableError") { + message = "Microphone is in use by another application."; + } else { + message = err.message; + } + } + setError(message); + } + }, []); + + const stopRecording = useCallback(async (): Promise<Blob | null> => { + return new Promise((resolve) => { + const mediaRecorder = mediaRecorderRef.current; + if (!mediaRecorder || mediaRecorder.state === "inactive") { + setIsRecording(false); + resolve(null); + return; + } + + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0) { + chunksRef.current.push(event.data); + } + }; + + mediaRecorder.onstop = () => { + const mimeType = mediaRecorder.mimeType; + const blob = new Blob(chunksRef.current, { type: mimeType }); + chunksRef.current = []; + + if (streamRef.current) { + streamRef.current.getTracks().forEach((track) => track.stop()); + streamRef.current = null; + } + + setIsRecording(false); + + if (blob.size === 0) { + setError("No audio recorded. Please try again."); + resolve(null); + } else { + convertToWav(blob) + .then((wavBlob) => resolve(wavBlob)) + .catch((err) => { + console.error("WAV conversion failed:", err); + resolve(blob); + }); + } + }; + + mediaRecorder.stop(); + }); + }, []); + + const toggleRecording = useCallback(async (): Promise<Blob | null> => { + if (isRecording) { + return stopRecording(); + } else { + await startRecording(); + return null; + } + }, [isRecording, startRecording, stopRecording]); + + return { + isRecording, + isProcessing, + error, + startRecording, + stopRecording, + toggleRecording, + }; +} diff --git a/refact-agent/gui/src/images/RefactIcon.tsx b/refact-agent/gui/src/images/RefactIcon.tsx new file mode 100644 index 000000000..66303bbb3 --- /dev/null +++ b/refact-agent/gui/src/images/RefactIcon.tsx @@ -0,0 +1,23 @@ +import type { FC, SVGAttributes } from "react"; + +export const RefactIcon: FC<SVGAttributes<SVGElement>> = (props) => { + return ( + <svg + width="15" + height="15" + viewBox="200 180 400 480" + fill="none" + xmlns="http://www.w3.org/2000/svg" + {...props} + > + <path + d="M527.46 573.548C510.073 573.548 494.568 570.209 480.948 563.531C467.328 557.143 456.605 547.562 448.781 534.786C441.246 522.011 437.479 506.332 437.479 487.749L437.479 449.859C437.479 441.729 434.726 435.196 429.22 430.26C424.004 425.034 416.904 421.985 407.92 421.114L407.92 378.868C416.904 378.287 424.004 375.238 429.22 369.722C434.726 363.915 437.479 357.237 437.479 349.688L437.479 312.668C437.479 294.376 441.391 278.987 449.216 266.502C457.04 253.727 467.762 244 481.383 237.322C495.003 230.353 510.362 226.869 527.46 226.869L547.891 226.869L547.891 273.47H535.285C523.693 273.47 514.419 277.245 507.464 284.794C500.509 292.343 497.032 303.086 497.032 317.023L497.032 344.026C497.032 361.447 492.105 375.384 482.252 385.836C472.689 395.999 460.518 403.112 445.738 407.177L446.173 391.934C460.952 396.289 473.124 403.838 482.687 414.581C492.25 425.034 497.032 438.68 497.032 455.52L497.032 483.394C497.032 497.621 500.509 508.509 507.464 516.059C514.419 523.317 523.693 526.947 535.285 526.947H547.891L547.891 573.548H527.46Z" + fill="currentColor" + /> + <path + d="M253 573.55L253 226L312.118 226L312.118 573.55L253 573.55ZM272.996 573.55L272.996 526.949L372.106 526.949L372.106 573.55L272.996 573.55ZM272.996 272.601L272.996 226L372.106 226L372.106 272.601L272.996 272.601Z" + fill="currentColor" + /> + </svg> + ); +}; diff --git a/refact-agent/gui/src/images/index.ts b/refact-agent/gui/src/images/index.ts index e44699618..7835d607a 100644 --- a/refact-agent/gui/src/images/index.ts +++ b/refact-agent/gui/src/images/index.ts @@ -1,2 +1,3 @@ export * from "./coin"; export * from "./linkIcon"; +export * from "./RefactIcon"; diff --git a/refact-agent/gui/src/services/refact/chat.ts b/refact-agent/gui/src/services/refact/chat.ts index 4f0dfd043..afb3dbf25 100644 --- a/refact-agent/gui/src/services/refact/chat.ts +++ b/refact-agent/gui/src/services/refact/chat.ts @@ -1,24 +1,23 @@ -import { IntegrationMeta, LspChatMode } from "../../features/Chat"; -import { CHAT_URL } from "./consts"; -// import { ToolCommand } from "./tools"; import { ChatRole, ThinkingBlock, ToolCall, ToolResult, UserMessage, + isToolContent, } from "./types"; -export const DEFAULT_MAX_NEW_TOKENS = null; - export type LspChatMessage = | { role: ChatRole; - // TODO make this a union type for user message content: string | null; - finish_reason?: "stop" | "length" | "abort" | "tool_calls" | null; - // TBD: why was index omitted ? - // tool_calls?: Omit<ToolCall, "index">[]; + finish_reason?: + | "stop" + | "length" + | "abort" + | "tool_calls" + | "error" + | null; thinking_blocks?: ThinkingBlock[]; tool_calls?: ToolCall[]; tool_call_id?: string; @@ -27,15 +26,31 @@ export type LspChatMessage = | UserMessage | { role: "tool"; content: ToolResult["content"]; tool_call_id: string }; -// could be more narrow. export function isLspChatMessage(json: unknown): json is LspChatMessage { if (!json) return false; if (typeof json !== "object") return false; if (!("role" in json)) return false; if (typeof json.role !== "string") return false; + + const role = json.role; + + if (role === "tool") { + if (!("tool_call_id" in json)) return false; + if (!("content" in json)) return false; + return isToolContent(json.content); + } + + if (role === "diff") { + if (!("content" in json)) return false; + return Array.isArray(json.content); + } + if (!("content" in json)) return false; - if (json.content !== null && typeof json.content !== "string") return false; - return true; + if (json.content === null) return true; + if (typeof json.content === "string") return true; + if (Array.isArray(json.content)) return true; + + return false; } export function isLspUserMessage( @@ -44,82 +59,6 @@ export function isLspUserMessage( return message.role === "user"; } -type StreamArgs = - | { - stream: true; - abortSignal: AbortSignal; - } - | { stream: false; abortSignal?: undefined | AbortSignal }; - -type SendChatArgs = { - messages: LspChatMessage[]; - last_user_message_id?: string; // used for `refact-message-id` header - model: string; - lspUrl?: string; - takeNote?: boolean; - onlyDeterministicMessages?: boolean; - chatId?: string; - port?: number; - apiKey?: string | null; - // isConfig?: boolean; - toolsConfirmed?: boolean; - checkpointsEnabled?: boolean; - integration?: IntegrationMeta | null; - mode?: LspChatMode; // used for chat actions - boost_reasoning?: boolean; - increase_max_tokens?: boolean; - include_project_info?: boolean; - context_tokens_cap?: number; - use_compression?: boolean; -} & StreamArgs; - -type GetChatTitleArgs = { - messages: LspChatMessage[]; - model: string; - lspUrl?: string; - takeNote?: boolean; - onlyDeterministicMessages?: boolean; - chatId?: string; - port?: number; - apiKey?: string | null; - boost_reasoning?: boolean; -} & StreamArgs; - -export type GetChatTitleResponse = { - choices: Choice[]; - created: number; - deterministic_messages: DeterministicMessage[]; - id: string; - metering_balance: number; - model: string; - object: string; - system_fingerprint: string; - usage: Usage; -}; - -export type GetChatTitleActionPayload = { - chatId: string; - title: string; -}; - -export type Choice = { - finish_reason: string; - index: number; - message: Message; -}; - -export type Message = { - content: string; - role: string; -}; - -export type DeterministicMessage = { - content: string; - role: string; - tool_call_id: string; - usage: unknown; -}; - export type CompletionTokenDetails = { accepted_prediction_tokens: number | null; audio_tokens: number | null; @@ -132,6 +71,14 @@ export type PromptTokenDetails = { cached_tokens: number; }; +export type MeteringUsd = { + prompt_usd: number; + generated_usd: number; + cache_read_usd?: number; + cache_creation_usd?: number; + total_usd: number; +}; + export type Usage = { completion_tokens: number; prompt_tokens: number; @@ -140,121 +87,26 @@ export type Usage = { prompt_tokens_details?: PromptTokenDetails | null; cache_creation_input_tokens?: number; cache_read_input_tokens?: number; + metering_usd?: MeteringUsd; }; -// TODO: add config url -export async function sendChat({ - messages, - model, - abortSignal, - stream, - // lspUrl, - // takeNote = false, - onlyDeterministicMessages: only_deterministic_messages, - chatId: chat_id, - port = 8001, - apiKey, - checkpointsEnabled = true, - // isConfig = false, - integration, - last_user_message_id = "", - mode, - boost_reasoning, - increase_max_tokens = false, - include_project_info, - context_tokens_cap, - use_compression, -}: SendChatArgs): Promise<Response> { - // const toolsResponse = await getAvailableTools(); - - // const tools = takeNote - // ? toolsResponse.filter( - // (tool) => tool.function.name === "remember_how_to_use_tools", - // ) - // : toolsResponse.filter( - // (tool) => tool.function.name !== "remember_how_to_use_tools", - // ); - - const body = JSON.stringify({ - messages, - model: model, - stream, - only_deterministic_messages, - checkpoints_enabled: checkpointsEnabled, - // chat_id, - parameters: boost_reasoning ? { boost_reasoning: true } : undefined, - increase_max_tokens: increase_max_tokens, - meta: { - chat_id, - request_attempt_id: last_user_message_id, - // chat_remote, - // TODO: pass this through - chat_mode: mode ?? "EXPLORE", - // chat_mode: "EXPLORE", // NOTOOLS, EXPLORE, AGENT, CONFIGURE, PROJECTSUMMARY, - // TODO: not clear, that if we set integration.path it's going to be set also in meta as current_config_file - ...(integration?.path ? { current_config_file: integration.path } : {}), - ...(include_project_info !== undefined ? { include_project_info } : {}), - ...(context_tokens_cap !== undefined ? { context_tokens_cap } : {}), - ...(use_compression !== undefined ? { use_compression } : {}), - }, - }); - - // const apiKey = getApiKey(); - const headers = { - "Content-Type": "application/json", - ...(apiKey ? { Authorization: "Bearer " + apiKey } : {}), - }; - - const url = `http://127.0.0.1:${port}${CHAT_URL}`; - - return fetch(url, { - method: "POST", - headers, - body, - redirect: "follow", - cache: "no-cache", - // TODO: causes an error during tests :/ - // referrer: "no-referrer", - signal: abortSignal, - credentials: "same-origin", - }); -} - -export async function generateChatTitle({ - messages, - stream, - model, - onlyDeterministicMessages: only_deterministic_messages, - chatId: chat_id, - port = 8001, - apiKey, -}: GetChatTitleArgs): Promise<Response> { - const body = JSON.stringify({ - messages, - model, - stream, - max_tokens: 300, - only_deterministic_messages: only_deterministic_messages, - chat_id, - // NOTE: we don't want to use reasoning here, for example Anthropic requires at least max_tokens=1024 for thinking - // parameters: boost_reasoning ? { boost_reasoning: true } : undefined, - }); - - const headers = { - "Content-Type": "application/json", - ...(apiKey ? { Authorization: "Bearer " + apiKey } : {}), - }; +export type TokenMapSegment = { + label: string; + category: string; + tokens: number; + percentage: number; +}; - const url = `http://127.0.0.1:${port}${CHAT_URL}`; +export type TokenMapItem = { + category: string; + label: string; + tokens: number; +}; - return fetch(url, { - method: "POST", - headers, - body, - redirect: "follow", - cache: "no-cache", - // TODO: causes an error during tests :/ - // referrer: "no-referrer", - credentials: "same-origin", - }); -} +export type TokenMap = { + total_prompt_tokens: number; + max_context_tokens: number; + estimated: boolean; + segments: TokenMapSegment[]; + top_items: TokenMapItem[]; +}; diff --git a/refact-agent/gui/src/services/refact/chatCommands.ts b/refact-agent/gui/src/services/refact/chatCommands.ts new file mode 100644 index 000000000..839b6b1bb --- /dev/null +++ b/refact-agent/gui/src/services/refact/chatCommands.ts @@ -0,0 +1,256 @@ +import { v4 as uuidv4 } from "uuid"; + +export type MessageContent = + | string + | ( + | { type: "text"; text: string } + | { type: "image_url"; image_url: { url: string } } + )[]; + +export type ChatCommandBase = + | { + type: "user_message"; + content: MessageContent; + attachments?: unknown[]; + } + | { + type: "retry_from_index"; + index: number; + content?: MessageContent; + attachments?: unknown[]; + } + | { + type: "set_params"; + patch: Record<string, unknown>; + } + | { + type: "abort"; + } + | { + type: "tool_decision"; + tool_call_id: string; + accepted: boolean; + } + | { + type: "tool_decisions"; + decisions: { tool_call_id: string; accepted: boolean }[]; + } + | { + type: "ide_tool_result"; + tool_call_id: string; + content: string; + tool_failed: boolean; + } + | { + type: "update_message"; + message_id: string; + content: MessageContent; + attachments?: unknown[]; + regenerate?: boolean; + } + | { + type: "remove_message"; + message_id: string; + regenerate?: boolean; + } + | { + type: "regenerate"; + } + | { + type: "branch_from_chat"; + source_chat_id: string; + up_to_message_id: string; + }; + +export type ChatCommand = ChatCommandBase & { + client_request_id: string; + priority?: boolean; +}; + +export async function sendChatCommand( + chatId: string, + port: number, + apiKey: string | undefined, + command: ChatCommandBase, + priority?: boolean, +): Promise<void> { + const commandWithId: ChatCommand = { + ...command, + client_request_id: uuidv4(), + priority, + }; + + const url = `http://127.0.0.1:${port}/v1/chats/${encodeURIComponent( + chatId, + )}/commands`; + + const headers: Record<string, string> = { + "Content-Type": "application/json", + }; + + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + + const response = await fetch(url, { + method: "POST", + headers, + body: JSON.stringify(commandWithId), + }); + + if (!response.ok) { + const text = await response.text(); + throw new Error( + `Failed to send command: ${response.status} ${response.statusText} - ${text}`, + ); + } +} + +export async function sendUserMessage( + chatId: string, + content: MessageContent, + port: number, + apiKey?: string, + priority?: boolean, +): Promise<void> { + await sendChatCommand( + chatId, + port, + apiKey, + { type: "user_message", content }, + priority, + ); +} + +export async function retryFromIndex( + chatId: string, + index: number, + content: MessageContent, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "retry_from_index", + index, + content, + } as ChatCommandBase); +} + +export async function regenerate( + chatId: string, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "regenerate", + } as ChatCommandBase); +} + +export async function updateChatParams( + chatId: string, + params: Record<string, unknown>, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "set_params", + patch: params, + } as ChatCommandBase); +} + +export async function abortGeneration( + chatId: string, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "abort", + } as ChatCommandBase); +} + +export async function respondToToolConfirmation( + chatId: string, + toolCallId: string, + accepted: boolean, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "tool_decision", + tool_call_id: toolCallId, + accepted, + } as ChatCommandBase); +} + +export async function respondToToolConfirmations( + chatId: string, + decisions: { tool_call_id: string; accepted: boolean }[], + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "tool_decisions", + decisions, + } as ChatCommandBase); +} + +export async function updateMessage( + chatId: string, + messageId: string, + content: MessageContent, + port: number, + apiKey?: string, + regenerate?: boolean, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "update_message", + message_id: messageId, + content, + regenerate, + } as ChatCommandBase); +} + +export async function removeMessage( + chatId: string, + messageId: string, + port: number, + apiKey?: string, + regenerate?: boolean, +): Promise<void> { + await sendChatCommand(chatId, port, apiKey, { + type: "remove_message", + message_id: messageId, + regenerate, + } as ChatCommandBase); +} + +export async function branchFromChat( + targetChatId: string, + sourceChatId: string, + upToMessageId: string, + port: number, + apiKey?: string, +): Promise<void> { + await sendChatCommand(targetChatId, port, apiKey, { + type: "branch_from_chat", + source_chat_id: sourceChatId, + up_to_message_id: upToMessageId, + } as ChatCommandBase); +} + +export async function cancelQueuedItem( + chatId: string, + clientRequestId: string, + port: number, + apiKey?: string, +): Promise<boolean> { + const url = `http://127.0.0.1:${port}/v1/chats/${encodeURIComponent( + chatId, + )}/queue/${encodeURIComponent(clientRequestId)}`; + const headers: Record<string, string> = {}; + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + const response = await fetch(url, { method: "DELETE", headers }); + return response.ok; +} diff --git a/refact-agent/gui/src/services/refact/chatModes.ts b/refact-agent/gui/src/services/refact/chatModes.ts new file mode 100644 index 000000000..ead0200b4 --- /dev/null +++ b/refact-agent/gui/src/services/refact/chatModes.ts @@ -0,0 +1,72 @@ +import { RootState } from "../../app/store"; +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; + +export type ChatModeThreadDefaults = { + include_project_info: boolean; + checkpoints_enabled: boolean; + auto_approve_editing_tools: boolean; + auto_approve_dangerous_commands: boolean; +}; + +export type ChatModeUi = { + order: number; + tags: string[]; +}; + +export type ChatModeInfo = { + id: string; + title: string; + description: string; + tools_count: number; + thread_defaults: ChatModeThreadDefaults; + ui: ChatModeUi; +}; + +export type ChatModeError = { + file_path: string; + error: string; +}; + +export type ChatModesResponse = { + modes: ChatModeInfo[]; + errors: ChatModeError[]; +}; + +export const chatModesApi = createApi({ + reducerPath: "chatModes", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const token = (getState() as RootState).config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + endpoints: (builder) => ({ + getChatModes: builder.query<ChatModesResponse, undefined>({ + queryFn: async (_args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/chat-modes`, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + return { data: result.data as ChatModesResponse }; + }, + }), + }), + refetchOnMountOrArgChange: true, +}); + +export const { useGetChatModesQuery } = chatModesApi; diff --git a/refact-agent/gui/src/services/refact/chatSubscription.ts b/refact-agent/gui/src/services/refact/chatSubscription.ts new file mode 100644 index 000000000..1b8495439 --- /dev/null +++ b/refact-agent/gui/src/services/refact/chatSubscription.ts @@ -0,0 +1,535 @@ +import type { ChatMessage } from "./types"; + +export type SessionState = + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + +export type ThreadParams = { + id: string; + title: string; + model: string; + mode: string; + tool_use: string; + boost_reasoning: boolean; + context_tokens_cap: number | null; + include_project_info: boolean; + checkpoints_enabled: boolean; + is_title_generated: boolean; + use_compression?: boolean; + auto_approve_editing_tools?: boolean; + auto_approve_dangerous_commands?: boolean; + reasoning_effort?: string | null; + thinking_budget?: number | null; + temperature?: number | null; + frequency_penalty?: number | null; + max_tokens?: number | null; + parallel_tool_calls?: boolean | null; + task_meta?: { + task_id: string; + role: string; + agent_id?: string; + card_id?: string; + }; + + // OpenAI Responses API stateful multi-turn + previous_response_id?: string; +}; + +export type PauseReason = { + type: string; + tool_name: string; + command: string; + rule: string; + tool_call_id: string; + integr_config_path: string | null; +}; + +export type QueuedItem = { + client_request_id: string; + priority: boolean; + command_type: string; + preview: string; +}; + +export type RuntimeState = { + state: SessionState; + paused: boolean; + error: string | null; + queue_size: number; + pause_reasons: PauseReason[]; + queued_items: QueuedItem[]; +}; + +export type DeltaOp = + | { op: "append_content"; text: string } + | { op: "append_reasoning"; text: string } + | { op: "set_tool_calls"; tool_calls: unknown[] } + | { op: "set_thinking_blocks"; blocks: unknown[] } + | { op: "add_citation"; citation: unknown } + | { op: "add_server_content_block"; block: unknown } + | { op: "set_usage"; usage: unknown } + | { op: "merge_extra"; extra: Record<string, unknown> }; + +export type EventEnvelope = + | { + chat_id: string; + seq: string; + type: "snapshot"; + thread: ThreadParams; + runtime: RuntimeState; + messages: ChatMessage[]; + } + | { + chat_id: string; + seq: string; + type: "thread_updated"; + [key: string]: unknown; + } + | { + chat_id: string; + seq: string; + type: "message_added"; + message: ChatMessage; + index: number; + } + | { + chat_id: string; + seq: string; + type: "message_updated"; + message_id: string; + message: ChatMessage; + } + | { + chat_id: string; + seq: string; + type: "message_removed"; + message_id: string; + } + | { + chat_id: string; + seq: string; + type: "messages_truncated"; + from_index: number; + } + | { + chat_id: string; + seq: string; + type: "stream_started"; + message_id: string; + } + | { + chat_id: string; + seq: string; + type: "stream_delta"; + message_id: string; + ops: DeltaOp[]; + } + | { + chat_id: string; + seq: string; + type: "stream_finished"; + message_id: string; + finish_reason: string | null; + } + | { + chat_id: string; + seq: string; + type: "pause_required"; + reasons: PauseReason[]; + } + | { + chat_id: string; + seq: string; + type: "pause_cleared"; + } + | { + chat_id: string; + seq: string; + type: "ide_tool_required"; + tool_call_id: string; + tool_name: string; + args: unknown; + } + | { + chat_id: string; + seq: string; + type: "subchat_update"; + tool_call_id: string; + subchat_id: string; + attached_files?: string[]; + } + | { + chat_id: string; + seq: string; + type: "ack"; + client_request_id: string; + accepted: boolean; + result: unknown; + } + | { + chat_id: string; + seq: string; + type: "queue_updated"; + queue_size: number; + queued_items: QueuedItem[]; + } + | { + chat_id: string; + seq: string; + type: "runtime_updated"; + state: string; + error?: string; + }; + +export type ChatEventEnvelope = EventEnvelope; + +export type ChatEventType = EventEnvelope["type"]; + +export type ChatSubscriptionCallbacks = { + onEvent: (event: EventEnvelope) => void; + onError: (error: Error) => void; + onConnected?: () => void; + onDisconnected?: () => void; + onActivity?: () => void; +}; + +export type SubscriptionOptions = { + connectTimeoutMs?: number; + idleTimeoutMs?: number; +}; + +const DEFAULT_CONNECT_TIMEOUT_MS = 15_000; +const DEFAULT_IDLE_TIMEOUT_MS = 45_000; +const MAX_SSE_BUFFER_CHARS = 8_000_000; +const MAX_SSE_EVENT_CHARS = 4_000_000; + +export function subscribeToChatEvents( + chatId: string, + port: number, + callbacks: ChatSubscriptionCallbacks, + apiKey?: string, + options: SubscriptionOptions = {}, +): () => void { + const url = `http://127.0.0.1:${port}/v1/chats/subscribe?chat_id=${encodeURIComponent( + chatId, + )}`; + + const connectTimeoutMs = + options.connectTimeoutMs ?? DEFAULT_CONNECT_TIMEOUT_MS; + const idleTimeoutMs = options.idleTimeoutMs ?? DEFAULT_IDLE_TIMEOUT_MS; + + const abortController = new AbortController(); + const state = { connected: false }; + let abortReason: string | null = null; + let connectTimer: ReturnType<typeof setTimeout> | null = null; + let idleTimer: ReturnType<typeof setTimeout> | null = null; + + const headers: Record<string, string> = {}; + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + + const clearTimers = () => { + if (connectTimer) { + clearTimeout(connectTimer); + connectTimer = null; + } + if (idleTimer) { + clearTimeout(idleTimer); + idleTimer = null; + } + }; + + const armIdleTimer = () => { + if (idleTimer) clearTimeout(idleTimer); + idleTimer = setTimeout(() => { + abortReason = abortReason ?? "SSE idle timeout"; + abortController.abort(); + }, idleTimeoutMs); + }; + + const disconnect = (notify: boolean) => { + if (state.connected) { + state.connected = false; + if (notify) callbacks.onDisconnected?.(); + } + }; + + connectTimer = setTimeout(() => { + if (!state.connected) { + abortReason = abortReason ?? "SSE connect timeout"; + abortController.abort(); + } + }, connectTimeoutMs); + + void fetch(url, { + method: "GET", + headers, + signal: abortController.signal, + }) + .then(async (response) => { + if (!response.ok) { + throw new Error(`SSE connection failed: ${response.status}`); + } + + if (!response.body) { + throw new Error("Response body is null"); + } + + clearTimers(); + state.connected = true; + callbacks.onConnected?.(); + armIdleTimer(); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + for (;;) { + const { done, value } = await reader.read(); + if (done) break; + + armIdleTimer(); + callbacks.onActivity?.(); + const chunk = decoder + .decode(value, { stream: true }) + .replace(/\r\n/g, "\n") + .replace(/\r/g, "\n"); + buffer += chunk; + + if (buffer.length > MAX_SSE_BUFFER_CHARS) { + abortReason = `SSE buffer exceeded ${MAX_SSE_BUFFER_CHARS} chars`; + abortController.abort(); + break; + } + + const blocks = buffer.split("\n\n"); + buffer = blocks.pop() ?? ""; + + for (const block of blocks) { + const trimmed = block.trim(); + if (!trimmed) continue; + if (trimmed.startsWith(":")) continue; + + const dataLines: string[] = []; + for (const rawLine of block.split("\n")) { + if (!rawLine.startsWith("data:")) continue; + dataLines.push(rawLine.slice(5).replace(/^\s*/, "")); + } + + if (dataLines.length === 0) continue; + + const dataStr = dataLines.join("\n"); + if (dataStr === "[DONE]") continue; + if (dataStr.length > MAX_SSE_EVENT_CHARS) { + if (process.env.NODE_ENV === "development") { + // eslint-disable-next-line no-console + console.warn( + `[SSE] Event too large (${dataStr.length} chars), skipping`, + ); + } + continue; + } + + try { + const parsed = JSON.parse(dataStr) as unknown; + if (!isValidChatEventBasic(parsed)) { + if (process.env.NODE_ENV === "development") { + // eslint-disable-next-line no-console + console.warn( + "[SSE] Invalid event structure:", + dataStr.slice(0, 200), + ); + } + continue; + } + normalizeSeq(parsed); + if (parsed.chat_id !== chatId) { + continue; + } + callbacks.onEvent(parsed); + } catch (e) { + if (process.env.NODE_ENV === "development") { + // eslint-disable-next-line no-console + console.warn("[SSE] Parse error:", e, dataStr.slice(0, 200)); + } + continue; + } + } + } + + clearTimers(); + if (abortController.signal.aborted) { + if (abortReason) { + callbacks.onError(new Error(abortReason)); + } + abortReason = null; + disconnect(false); + return; + } + disconnect(true); + }) + .catch((err: unknown) => { + clearTimers(); + const error = err as Error; + + if (error.name === "AbortError") { + if (abortReason) { + callbacks.onError(new Error(abortReason)); + } + abortReason = null; + disconnect(true); + return; + } + + callbacks.onError(error); + disconnect(false); + }); + + return () => { + abortReason = null; + clearTimers(); + abortController.abort(); + disconnect(false); + }; +} + +function isValidChatEventBasic(data: unknown): data is EventEnvelope { + if (typeof data !== "object" || data === null) return false; + const obj = data as Record<string, unknown>; + if (typeof obj.chat_id !== "string") return false; + if (typeof obj.seq !== "string" && typeof obj.seq !== "number") return false; + if (typeof obj.type !== "string") return false; + return true; +} + +function normalizeSeq(obj: EventEnvelope): void { + const s = obj.seq as string | number; + if (typeof s === "string") { + const trimmed = s.trim(); + if (!/^\d+$/.test(trimmed)) { + throw new Error("Invalid seq string"); + } + (obj as { seq: string }).seq = trimmed; + return; + } + if (typeof s === "number") { + if (!Number.isFinite(s) || !Number.isInteger(s) || s < 0) { + throw new Error("Invalid seq number"); + } + (obj as { seq: string }).seq = String(s); + return; + } + throw new Error("Missing/invalid seq"); +} + +export function applyDeltaOps( + message: ChatMessage, + ops: DeltaOp[], +): ChatMessage { + if (ops.length === 0) return message; + + const updated = { ...message } as ChatMessage & { + content?: string; + reasoning_content?: string; + tool_calls?: unknown[]; + thinking_blocks?: unknown[]; + citations?: unknown[]; + server_content_blocks?: unknown[]; + usage?: unknown; + extra?: Record<string, unknown>; + }; + + // Two-pass: accumulate all chunks first, apply once — avoids O(n²) + // string concatenation and repeated array spreading. + const contentChunks: string[] = []; + const reasoningChunks: string[] = []; + const newCitations: unknown[] = []; + const newServerBlocks: unknown[] = []; + let lastToolCalls: unknown[] | undefined; + let lastThinkingBlocks: unknown[] | undefined; + let lastUsage: unknown; + let mergedExtra: Record<string, unknown> | undefined; + + for (const op of ops) { + switch (op.op) { + case "append_content": + contentChunks.push(op.text); + break; + case "append_reasoning": + reasoningChunks.push(op.text); + break; + case "set_tool_calls": + lastToolCalls = op.tool_calls; + break; + case "set_thinking_blocks": + lastThinkingBlocks = op.blocks; + break; + case "add_citation": + newCitations.push(op.citation); + break; + case "add_server_content_block": + newServerBlocks.push(op.block); + break; + case "set_usage": + lastUsage = op.usage; + break; + case "merge_extra": + if (mergedExtra) { + Object.assign(mergedExtra, op.extra); + } else { + mergedExtra = { ...op.extra }; + } + break; + } + } + + if (contentChunks.length > 0) { + const appended = contentChunks.join(""); + updated.content = + typeof updated.content === "string" + ? updated.content + appended + : appended; + } + + if (reasoningChunks.length > 0) { + updated.reasoning_content = + (updated.reasoning_content ?? "") + reasoningChunks.join(""); + } + + if (lastToolCalls !== undefined) { + updated.tool_calls = lastToolCalls; + } + + if (lastThinkingBlocks !== undefined) { + updated.thinking_blocks = lastThinkingBlocks; + } + + if (newCitations.length > 0) { + const existing = updated.citations ?? []; + updated.citations = existing.concat(newCitations); + } + + if (newServerBlocks.length > 0) { + const existing = updated.server_content_blocks ?? []; + updated.server_content_blocks = existing.concat(newServerBlocks); + } + + if (lastUsage !== undefined) { + updated.usage = lastUsage; + } + + if (mergedExtra) { + updated.extra = updated.extra + ? Object.assign({}, updated.extra, mergedExtra) + : mergedExtra; + } + + return updated; +} diff --git a/refact-agent/gui/src/services/refact/checkpoints.ts b/refact-agent/gui/src/services/refact/checkpoints.ts index 4d7f1ac85..b8c94e658 100644 --- a/refact-agent/gui/src/services/refact/checkpoints.ts +++ b/refact-agent/gui/src/services/refact/checkpoints.ts @@ -18,7 +18,6 @@ export const checkpointsApi = createApi({ const getState = api.getState as () => RootState; const state = getState(); const token = state.config.apiKey; - headers.set("credentials", "same-origin"); if (token) { headers.set("Authorization", `Bearer ${token}`); } @@ -32,13 +31,10 @@ export const checkpointsApi = createApi({ >({ async queryFn(args, api, _extraOptions, baseQuery) { const state = api.getState() as RootState; - const { checkpoints } = args; - const port = state.config.lspPort as unknown as number; + const { checkpoints, chat_id, chat_mode } = args; + const port = state.config.lspPort; const url = `http://127.0.0.1:${port}${PREVIEW_CHECKPOINTS}`; - const chat_id = state.chat.thread.id; - const mode = state.chat.thread.mode; - const result = await baseQuery({ url, credentials: "same-origin", @@ -47,7 +43,7 @@ export const checkpointsApi = createApi({ body: { meta: { chat_id, - chat_mode: mode ?? "EXPLORE", + chat_mode: chat_mode ?? "EXPLORE", }, checkpoints, }, @@ -74,13 +70,10 @@ export const checkpointsApi = createApi({ >({ async queryFn(args, api, _extraOptions, baseQuery) { const state = api.getState() as RootState; - const { checkpoints } = args; - const port = state.config.lspPort as unknown as number; + const { checkpoints, chat_id, chat_mode } = args; + const port = state.config.lspPort; const url = `http://127.0.0.1:${port}${RESTORE_CHECKPOINTS}`; - const chat_id = state.chat.thread.id; - const mode = state.chat.thread.mode; - const result = await baseQuery({ url, credentials: "same-origin", @@ -89,7 +82,7 @@ export const checkpointsApi = createApi({ body: { meta: { chat_id, - chat_mode: mode ?? "EXPLORE", + chat_mode: chat_mode ?? "EXPLORE", }, checkpoints, }, diff --git a/refact-agent/gui/src/services/refact/commands.ts b/refact-agent/gui/src/services/refact/commands.ts index ddb9aa915..84c7a6a34 100644 --- a/refact-agent/gui/src/services/refact/commands.ts +++ b/refact-agent/gui/src/services/refact/commands.ts @@ -88,7 +88,7 @@ export const commandsApi = createApi({ queryFn: async (args, api, _opts, baseQuery) => { const { messages, meta, model } = args; const state = api.getState() as RootState; - const port = state.config.lspPort; + const port = state.config.lspPort as unknown as number; const url = `http://127.0.0.1:${port}${AT_COMMAND_PREVIEW}`; const response = await baseQuery({ url, @@ -124,15 +124,20 @@ export const commandsApi = createApi({ }; } - const files = response.data.messages.reduce< - (ChatContextFile | string)[] - >((acc, curr) => { - if (curr.role === "context_file") { - const fileData = parseOrElse<ChatContextFile[]>(curr.content, []); - return [...acc, ...fileData]; + const files: (ChatContextFile | string)[] = []; + for (const msg of response.data.messages) { + if (msg.role === "context_file") { + const content = msg.content; + if (Array.isArray(content)) { + files.push(...content); + } else { + const fileData = parseOrElse<ChatContextFile[]>(content, []); + files.push(...fileData); + } + } else { + files.push(msg.content); } - return [...acc, curr.content]; - }, []); + } return { data: { ...response.data, files } }; }, @@ -181,19 +186,22 @@ export function isDetailMessageWithErrorType( return true; } -export type CommandPreviewContent = { - content: string; - role: "context_file" | "plain_text"; -}; +export type CommandPreviewContent = + | { role: "plain_text"; content: string } + | { role: "context_file"; content: ChatContextFile[] | string }; function isCommandPreviewContent(json: unknown): json is CommandPreviewContent { - if (!json) return false; - if (typeof json !== "object") return false; - if (!("content" in json)) return false; - if (typeof json.content !== "string") return false; - if (!("role" in json)) return false; - if (json.role === "context_file") return true; - if (json.role === "plain_text") return true; + if (!json || typeof json !== "object") return false; + if (!("role" in json) || !("content" in json)) return false; + + const obj = json as { role: unknown; content: unknown }; + + if (obj.role === "plain_text") { + return typeof obj.content === "string"; + } + if (obj.role === "context_file") { + return Array.isArray(obj.content) || typeof obj.content === "string"; + } return false; } @@ -223,5 +231,5 @@ export function isCommandPreviewResponse( if (!json.messages.length) return true; - return json.messages.some(isCommandPreviewContent); + return json.messages.every(isCommandPreviewContent); } diff --git a/refact-agent/gui/src/services/refact/consts.ts b/refact-agent/gui/src/services/refact/consts.ts index 7d0553a34..5ec73999b 100644 --- a/refact-agent/gui/src/services/refact/consts.ts +++ b/refact-agent/gui/src/services/refact/consts.ts @@ -1,4 +1,3 @@ -export const CHAT_URL = `/v1/chat`; export const CAPS_URL = `/v1/caps`; export const STATISTIC_URL = `/v1/get-dashboard-plots`; export const AT_COMMAND_COMPLETION = "/v1/at-command-completion"; @@ -35,12 +34,25 @@ export const RESTORE_CHECKPOINTS = "/v1/checkpoints-restore"; export const TELEMETRY_CHAT_PATH = "/v1/telemetry-chat"; export const TELEMETRY_NET_PATH = "/v1/telemetry-network"; -export const KNOWLEDGE_CREATE_URL = "/v1/trajectory-save"; export const COMPRESS_MESSAGES_URL = "/v1/trajectory-compress"; export const SET_ACTIVE_GROUP_ID = "/v1/set-active-group-id"; -// Providers & Models +export const TRAJECTORY_TRANSFORM_PREVIEW_URL = + "/v1/chats/{chat_id}/trajectory/transform/preview"; +export const TRAJECTORY_TRANSFORM_APPLY_URL = + "/v1/chats/{chat_id}/trajectory/transform/apply"; +export const TRAJECTORY_HANDOFF_PREVIEW_URL = + "/v1/chats/{chat_id}/trajectory/handoff/preview"; +export const TRAJECTORY_HANDOFF_APPLY_URL = + "/v1/chats/{chat_id}/trajectory/handoff/apply"; +export const TRAJECTORY_MODE_TRANSITION_APPLY_URL = + "/v1/chats/{chat_id}/trajectory/mode-transition/apply"; + +// Providers & Models (new provider system) +export const PROVIDERS_URL = "/v1/providers"; +export const PROVIDER_DEFAULTS_URL = "/v1/defaults"; +// Legacy - kept for backward compatibility export const CONFIGURED_PROVIDERS_URL = "/v1/providers"; export const PROVIDER_TEMPLATES_URL = "/v1/provider-templates"; export const PROVIDER_URL = "/v1/provider"; diff --git a/refact-agent/gui/src/services/refact/customization.ts b/refact-agent/gui/src/services/refact/customization.ts new file mode 100644 index 000000000..1c840b6cb --- /dev/null +++ b/refact-agent/gui/src/services/refact/customization.ts @@ -0,0 +1,231 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import type { RootState } from "../../app/store"; + +export interface ConfigItem { + id: string; + kind: string; + title: string; + file_path: string; + specific: boolean; + scope: "global" | "local"; + global_path: string; + local_path: string; + global_exists: boolean; + local_exists: boolean; +} + +export interface ErrorItem { + file_path: string; + error: string; +} + +export interface RegistryResponse { + modes: ConfigItem[]; + subagents: ConfigItem[]; + toolbox_commands: ConfigItem[]; + code_lens: ConfigItem[]; + errors: ErrorItem[]; + has_project_root?: boolean; +} + +export interface ConfigDetailResponse { + config: Record<string, unknown>; + file_path: string; + raw_yaml: string; + scope: "global" | "local"; +} + +export interface SaveConfigResponse { + ok: boolean; + file_path: string; + scope: "global" | "local"; + errors: ErrorItem[]; +} + +export interface DeleteConfigResponse { + ok: boolean; + scope: "global" | "local"; + errors: ErrorItem[]; +} + +export type ConfigKind = + | "modes" + | "subagents" + | "toolbox_commands" + | "code_lens"; + +export const customizationApi = createApi({ + reducerPath: "customizationApi", + tagTypes: ["Registry", "Config"], + baseQuery: fetchBaseQuery({ + baseUrl: "/", + prepareHeaders: (headers, { getState }) => { + const state = getState() as RootState; + const token = state.config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + endpoints: (builder) => ({ + getRegistry: builder.query<RegistryResponse, undefined>({ + queryFn: async (_arg, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/customization/registry`, + }); + if (result.error) { + return { + error: { + status: result.error.status as number, + data: String(result.error.data), + }, + }; + } + return { data: result.data as RegistryResponse }; + }, + providesTags: ["Registry"], + }), + + getConfig: builder.query< + ConfigDetailResponse, + { kind: ConfigKind; id: string; scope?: "global" | "local" } + >({ + queryFn: async ({ kind, id, scope }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const scopeParam = scope ? `?scope=${scope}` : ""; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/customization/${kind}/${id}${scopeParam}`, + }); + if (result.error) { + return { + error: { + status: result.error.status as number, + data: String(result.error.data), + }, + }; + } + return { data: result.data as ConfigDetailResponse }; + }, + providesTags: (_result, _error, { kind, id }) => [ + { type: "Config", id: `${kind}/${id}` }, + ], + }), + + saveConfig: builder.mutation< + SaveConfigResponse, + { + kind: ConfigKind; + id: string; + config: Record<string, unknown>; + scope?: "global" | "local"; + } + >({ + queryFn: async ({ kind, id, config, scope }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/customization/${kind}/${id}`, + method: "PUT", + body: { config, scope }, + }); + if (result.error) { + return { + error: { + status: result.error.status as number, + data: String(result.error.data), + }, + }; + } + return { data: result.data as SaveConfigResponse }; + }, + invalidatesTags: (_result, _error, { kind, id }) => [ + "Registry", + { type: "Config", id: `${kind}/${id}` }, + ], + }), + + createConfig: builder.mutation< + SaveConfigResponse, + { + kind: ConfigKind; + id: string; + config: Record<string, unknown>; + scope?: "global" | "local"; + } + >({ + queryFn: async ({ kind, id, config, scope }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/customization/${kind}`, + method: "POST", + body: { id, config, scope }, + }); + if (result.error) { + return { + error: { + status: result.error.status as number, + data: String(result.error.data), + }, + }; + } + return { data: result.data as SaveConfigResponse }; + }, + invalidatesTags: ["Registry"], + }), + + deleteConfig: builder.mutation< + DeleteConfigResponse, + { kind: ConfigKind; id: string; scope: "global" | "local" } + >({ + queryFn: async ({ kind, id, scope }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/customization/${kind}/${id}?scope=${scope}`, + method: "DELETE", + }); + if (result.error) { + return { + error: { + status: result.error.status as number, + data: String(result.error.data), + }, + }; + } + return { data: result.data as DeleteConfigResponse }; + }, + invalidatesTags: (_result, _error, { kind, id }) => [ + "Registry", + { type: "Config", id: `${kind}/${id}` }, + ], + }), + }), +}); + +export const { + useGetRegistryQuery, + useGetConfigQuery, + useSaveConfigMutation, + useCreateConfigMutation, + useDeleteConfigMutation, +} = customizationApi; diff --git a/refact-agent/gui/src/services/refact/index.ts b/refact-agent/gui/src/services/refact/index.ts index 9047e19d1..e2663f6ea 100644 --- a/refact-agent/gui/src/services/refact/index.ts +++ b/refact-agent/gui/src/services/refact/index.ts @@ -15,4 +15,11 @@ export * from "./integrations"; export * from "./docker"; export * from "./telemetry"; export * from "./knowledge"; +export * from "./knowledgeGraphApi"; export * from "./teams"; +export * from "./trajectories"; +export * from "./trajectory"; +export * from "./chatSubscription"; +export * from "./chatCommands"; +export * from "./tasks"; +export * from "./sidebarSubscription"; diff --git a/refact-agent/gui/src/services/refact/knowledge.ts b/refact-agent/gui/src/services/refact/knowledge.ts index 169be2988..8f7d3833a 100644 --- a/refact-agent/gui/src/services/refact/knowledge.ts +++ b/refact-agent/gui/src/services/refact/knowledge.ts @@ -1,7 +1,7 @@ import { RootState } from "../../app/store"; import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; import { formatMessagesForLsp } from "../../features/Chat/Thread/utils"; -import { COMPRESS_MESSAGES_URL, KNOWLEDGE_CREATE_URL } from "./consts"; +import { COMPRESS_MESSAGES_URL } from "./consts"; import { type ChatMessages } from "."; export type SubscribeArgs = @@ -68,21 +68,6 @@ export type CompressTrajectoryPost = { messages: ChatMessages; }; -export type SaveTrajectoryResponse = { - memid: string; - trajectory: string; -}; - -function isSaveTrajectoryResponse(obj: unknown): obj is SaveTrajectoryResponse { - if (!obj) return false; - if (typeof obj !== "object") return false; - if (!("memid" in obj) || typeof obj.memid !== "string") return false; - if (!("trajectory" in obj) || typeof obj.trajectory !== "string") { - return false; - } - return true; -} - export const knowledgeApi = createApi({ reducerPath: "knowledgeApi", baseQuery: fetchBaseQuery({ @@ -95,41 +80,6 @@ export const knowledgeApi = createApi({ }, }), endpoints: (builder) => ({ - createNewMemoryFromMessages: builder.mutation< - SaveTrajectoryResponse, - CompressTrajectoryPost - >({ - async queryFn(arg, api, extraOptions, baseQuery) { - const messagesForLsp = formatMessagesForLsp(arg.messages); - - const state = api.getState() as RootState; - const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${KNOWLEDGE_CREATE_URL}`; - const response = await baseQuery({ - ...extraOptions, - url, - method: "POST", - body: { project: arg.project, messages: messagesForLsp }, - }); - - if (response.error) { - return { error: response.error }; - } - - if (!isSaveTrajectoryResponse(response.data)) { - return { - error: { - status: "CUSTOM_ERROR", - error: `Invalid response from ${url}`, - data: response.data, - }, - }; - } - - return { data: response.data }; - }, - }), - compressMessages: builder.mutation< { goal: string; trajectory: string }, CompressTrajectoryPost diff --git a/refact-agent/gui/src/services/refact/knowledgeGraphApi.ts b/refact-agent/gui/src/services/refact/knowledgeGraphApi.ts new file mode 100644 index 000000000..9f7aa0cb9 --- /dev/null +++ b/refact-agent/gui/src/services/refact/knowledgeGraphApi.ts @@ -0,0 +1,113 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import { RootState } from "../../app/store"; +import type { KnowledgeGraphResponse, SuccessResponse } from "./types"; + +export const knowledgeGraphApi = createApi({ + reducerPath: "knowledgeGraphApi", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const token = (getState() as RootState).config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + tagTypes: ["KnowledgeGraph", "Memory"], + endpoints: (builder) => ({ + getKnowledgeGraph: builder.query< + KnowledgeGraphResponse, + { includeContent?: boolean } | undefined + >({ + async queryFn(arg, api, _extraOptions, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const includeContent = arg?.includeContent ?? false; + const url = `http://127.0.0.1:${port}/v1/knowledge-graph?include_content=${ + includeContent ? 1 : 0 + }`; + + const response = await baseQuery({ url }); + + if (response.error) { + return { error: response.error }; + } + + return { data: response.data as KnowledgeGraphResponse }; + }, + providesTags: ["KnowledgeGraph"], + }), + + updateMemory: builder.mutation< + SuccessResponse, + { + file_path: string; + title?: string; + content: string; + tags: string[]; + kind: string; + filenames: string[]; + } + >({ + async queryFn(arg, api, _extraOptions, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}/v1/knowledge/update-memory`; + + const response = await baseQuery({ + url, + method: "POST", + body: arg, + }); + + if (response.error) { + return { error: response.error }; + } + + return { data: response.data as SuccessResponse }; + }, + invalidatesTags: ["KnowledgeGraph", "Memory"], + async onQueryStarted(_arg, { dispatch, queryFulfilled }) { + try { + await queryFulfilled; + dispatch(knowledgeGraphApi.util.invalidateTags(["KnowledgeGraph"])); + } catch { + // Error is handled by RTK Query + } + }, + }), + + deleteMemory: builder.mutation< + SuccessResponse, + { + file_path: string; + archive?: boolean; + } + >({ + async queryFn(arg, api, _extraOptions, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}/v1/knowledge/delete-memory`; + + const response = await baseQuery({ + url, + method: "POST", + body: arg, + }); + + if (response.error) { + return { error: response.error }; + } + + return { data: response.data as SuccessResponse }; + }, + invalidatesTags: ["KnowledgeGraph"], + }), + }), +}); + +export const { + useGetKnowledgeGraphQuery, + useUpdateMemoryMutation, + useDeleteMemoryMutation, +} = knowledgeGraphApi; diff --git a/refact-agent/gui/src/services/refact/models.ts b/refact-agent/gui/src/services/refact/models.ts index 91262dd3a..139afe12e 100644 --- a/refact-agent/gui/src/services/refact/models.ts +++ b/refact-agent/gui/src/services/refact/models.ts @@ -245,7 +245,7 @@ export type SimplifiedModel = { export type ModelsResponse = { completion_models: SimplifiedModel[]; chat_models: SimplifiedModel[]; - embedding_model: SimplifiedModel; + embedding_model: SimplifiedModel | null; }; export type ModelType = "embedding" | "completion" | "chat"; @@ -272,8 +272,6 @@ export type DeleteModelRequestBody = Omit<UpdateModelRequestBody, "model"> & { model: string; }; -export type SupportsReasoningStyle = "openai" | "anthropic" | "deepseek" | null; - export type CodeChatModel = { n_ctx: number; name: string; @@ -284,9 +282,13 @@ export type CodeChatModel = { supports_multimodality: boolean; supports_clicks: boolean; supports_agent: boolean; - supports_reasoning: SupportsReasoningStyle; - supports_boost_reasoning: boolean; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; default_temperature: number | null; + default_frequency_penalty?: number | null; + default_max_tokens?: number | null; + max_output_tokens?: number | null; enabled: boolean; @@ -352,14 +354,6 @@ export function isCodeChatModel(data: unknown): data is CodeChatModel { if (!("supports_agent" in data) || typeof data.supports_agent !== "boolean") return false; - if (!("supports_reasoning" in data)) return false; - - if ( - !("supports_boost_reasoning" in data) || - typeof data.supports_boost_reasoning !== "boolean" - ) - return false; - if (!("default_temperature" in data)) return false; if ( data.default_temperature !== null && diff --git a/refact-agent/gui/src/services/refact/projectInformation.ts b/refact-agent/gui/src/services/refact/projectInformation.ts new file mode 100644 index 000000000..7f0430bfe --- /dev/null +++ b/refact-agent/gui/src/services/refact/projectInformation.ts @@ -0,0 +1,163 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import { RootState } from "../../app/store"; + +export type FileOverride = { + enabled?: boolean; + max_chars?: number; +}; + +export type SectionConfig = { + enabled: boolean; + max_chars?: number; + max_items?: number; + max_chars_per_item?: number; + max_depth?: number; + overrides?: Record<string, FileOverride>; +}; + +export type ProjectInformationConfig = { + schema_version: number; + enabled: boolean; + defaults: { + max_chars_per_item: number; + max_items_per_section: number; + }; + sections: { + system_info: SectionConfig; + environment_instructions: SectionConfig; + detected_environments: SectionConfig; + git_info: SectionConfig; + project_tree: SectionConfig; + instruction_files: SectionConfig; + project_configs: SectionConfig; + memories: SectionConfig; + }; +}; + +export type ProjectInfoBlock = { + id: string; + section: string; + title: string; + path: string | null; + content: string; + truncated: boolean; + enabled: boolean; + char_count: number; + original_char_count?: number; +}; + +export type ProjectInformationPreviewResponse = { + blocks: ProjectInfoBlock[]; + warnings: string[]; +}; + +const DEFAULT_CONFIG: ProjectInformationConfig = { + schema_version: 1, + enabled: true, + defaults: { + max_chars_per_item: 8000, + max_items_per_section: 50, + }, + sections: { + system_info: { enabled: true }, + environment_instructions: { enabled: true, max_chars: 6000 }, + detected_environments: { enabled: true, max_items: 50 }, + git_info: { enabled: true, max_chars: 6000 }, + project_tree: { enabled: true, max_depth: 4, max_chars: 16000 }, + instruction_files: { + enabled: true, + max_items: 20, + max_chars_per_item: 8000, + }, + project_configs: { enabled: true, max_items: 30, max_chars_per_item: 4000 }, + memories: { enabled: true, max_items: 30, max_chars_per_item: 2000 }, + }, +}; + +export { DEFAULT_CONFIG as defaultProjectInformationConfig }; + +export const projectInformationApi = createApi({ + reducerPath: "projectInformationApi", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const state = getState() as RootState; + const apiKey = state.config.apiKey; + if (apiKey) { + headers.set("Authorization", `Bearer ${apiKey}`); + } + return headers; + }, + }), + tagTypes: ["ProjectInformation"], + endpoints: (builder) => ({ + getProjectInformation: builder.query<ProjectInformationConfig, undefined>({ + queryFn: async (_args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/project-information`, + }); + if (result.error) { + return { error: result.error }; + } + return { + data: + (result.data as ProjectInformationConfig | null) ?? DEFAULT_CONFIG, + }; + }, + providesTags: ["ProjectInformation"], + }), + saveProjectInformation: builder.mutation< + undefined, + ProjectInformationConfig + >({ + queryFn: async (config, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/project-information`, + method: "POST", + body: config, + }); + if (result.error) { + return { error: result.error }; + } + return { data: undefined }; + }, + invalidatesTags: ["ProjectInformation"], + }), + getProjectInformationPreview: builder.mutation< + ProjectInformationPreviewResponse, + ProjectInformationConfig + >({ + queryFn: async (config, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + if (!port) { + return { error: { status: 500, data: "Missing lspPort in config" } }; + } + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/project-information/preview`, + method: "POST", + body: config, + }); + if (result.error) { + return { error: result.error }; + } + return { data: result.data as ProjectInformationPreviewResponse }; + }, + }), + }), +}); + +export const { + useGetProjectInformationQuery, + useSaveProjectInformationMutation, + useGetProjectInformationPreviewMutation, +} = projectInformationApi; diff --git a/refact-agent/gui/src/services/refact/providers.ts b/refact-agent/gui/src/services/refact/providers.ts index eb9592072..5a8cc50c8 100644 --- a/refact-agent/gui/src/services/refact/providers.ts +++ b/refact-agent/gui/src/services/refact/providers.ts @@ -1,20 +1,220 @@ import { RootState } from "../../app/store"; import { hasProperty } from "../../utils"; import { isDetailMessage } from "./commands"; -import { - CONFIGURED_PROVIDERS_URL, - PROVIDER_TEMPLATES_URL, - PROVIDER_URL, -} from "./consts"; +import { PROVIDERS_URL, PROVIDER_DEFAULTS_URL } from "./consts"; import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +export type WireFormat = + | "openai_chat_completions" + | "openai_responses" + | "anthropic_messages" + | "refact"; + +export type ProviderModel = { + id: string; + base_name: string; + enabled: boolean; + n_ctx: number; + supports_tools: boolean; + supports_multimodality: boolean; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; + supports_agent: boolean; + wire_format_override: WireFormat | null; + endpoint_override: string | null; + user_configured: boolean; + removable: boolean; +}; + +export type ProviderRuntime = { + name: string; + display_name: string; + enabled: boolean; + readonly: boolean; + wire_format: WireFormat; + chat_endpoint: string; + completion_endpoint: string; + embedding_endpoint: string; + support_metadata: boolean; + chat_models: ProviderModel[]; + completion_models: ProviderModel[]; + embedding_model: ProviderModel | null; +}; + +export type ProviderStatus = "not_configured" | "configured" | "active"; + +export type ProviderListItem = { + name: string; + display_name: string; + enabled: boolean; + readonly: boolean; + has_credentials: boolean; + status: ProviderStatus; + model_count: number; +}; + +export type ProviderListResponse = { + providers: ProviderListItem[]; +}; + +export type ProviderDetailResponse = { + name: string; + display_name: string; + enabled: boolean; + readonly: boolean; + has_credentials: boolean; + selected_models_count: number; + status: ProviderStatus; + settings: Record<string, unknown>; + runtime: ProviderRuntime | null; +}; + +export type ProviderSchemaResponse = { + name: string; + schema: string; +}; + +export type ProviderModelsResponse = { + models: ProviderModel[]; +}; + +// Available models from model discovery (lazy loaded) +export type AvailableModel = { + id: string; + display_name: string | null; + n_ctx: number; + supports_tools: boolean; + supports_multimodality: boolean; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; + tokenizer: string | null; + enabled: boolean; + is_custom: boolean; + pricing?: { + prompt: number; + generated: number; + cache_read?: number; + cache_creation?: number; + } | null; + available_providers?: string[]; + selected_provider?: string | null; + max_output_tokens?: number | null; + provider_variants?: { + id: string; + name?: string | null; + tag?: string | null; + context_length?: number | null; + max_output_tokens?: number | null; + pricing?: { + prompt: number; + generated: number; + cache_read?: number; + cache_creation?: number; + } | null; + latency_last_30m?: number | null; + throughput_last_30m?: number | null; + uptime_last_30m?: number | null; + supported_parameters?: string[] | null; + }[]; +}; + +export type AvailableModelsResponse = { + models: AvailableModel[]; + source: "model_caps" | "api" | "local" | "manual"; + error?: string | null; +}; + +export type OpenRouterAccountInfoResponse = { + data: { + key_name?: string | null; + key_label?: string | null; + limit?: number | null; + usage?: number | null; + remaining?: number | null; + is_free_tier?: boolean | null; + rate_limit?: unknown; + }; +}; + +export type OpenRouterHealthResponse = { + ok: boolean; + message?: string | null; + data?: { + key_name?: string | null; + key_label?: string | null; + rate_limit?: unknown; + } | null; +}; + +export type OpenRouterModelEndpointsResponse = { + provider_variants: NonNullable<AvailableModel["provider_variants"]>; + available_providers: string[]; +}; + +export type ModelToggleRequest = { + model_id: string; + enabled: boolean; +}; + +export type ModelProviderRequest = { + model_id: string; + selected_provider?: string | null; +}; + +export type CustomModelConfig = { + n_ctx: number; + supports_tools?: boolean; + supports_multimodality?: boolean; + reasoning_effort_options?: string[] | null; + supports_thinking_budget?: boolean; + supports_adaptive_thinking_budget?: boolean; + tokenizer?: string | null; +}; + +export type AddCustomModelRequest = { + id: string; +} & CustomModelConfig; + +export type ModelTypeDefaults = { + model?: string; + max_new_tokens?: number; + temperature?: number; + top_p?: number; + boost_reasoning?: boolean; + reasoning_effort?: string; + thinking_budget?: number; +}; + +export type ProviderDefaults = { + chat: ModelTypeDefaults; + chat_light: ModelTypeDefaults; + chat_thinking: ModelTypeDefaults; + completion_model?: string; + embedding_model?: string; +}; + +export type ErrorLogInstance = { + path: string; + error_line: number; + error_msg: string; +}; + +export type ConfiguredProvidersResponse = { + providers: ProviderListItem[]; + error_log?: ErrorLogInstance[]; +}; + export const providersApi = createApi({ reducerPath: "providers", tagTypes: [ "PROVIDERS", - "TEMPLATE_PROVIDERS", - "CONFIGURED_PROVIDERS", "PROVIDER", + "PROVIDER_SCHEMA", + "PROVIDER_MODELS", + "AVAILABLE_MODELS", + "DEFAULTS", ], baseQuery: fetchBaseQuery({ prepareHeaders: (headers, { getState }) => { @@ -33,7 +233,7 @@ export const providersApi = createApi({ queryFn: async (_args, api, extraOptions, baseQuery) => { const state = api.getState() as RootState; const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${CONFIGURED_PROVIDERS_URL}`; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}`; const result = await baseQuery({ ...extraOptions, @@ -45,7 +245,7 @@ export const providersApi = createApi({ if (result.error) { return { error: result.error }; } - if (!isConfiguredProvidersResponse(result.data)) { + if (!isProviderListResponse(result.data)) { return { meta: result.meta, error: { @@ -56,16 +256,61 @@ export const providersApi = createApi({ }; } + return { data: { providers: result.data.providers, error_log: [] } }; + }, + providesTags: [{ type: "PROVIDERS", id: "LIST" }], + }), + + getProvider: builder.query< + ProviderDetailResponse, + { providerName: string } + >({ + providesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}`; + + const result = await baseQuery({ + ...extraOptions, + method: "GET", + url, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + if (!isProviderDetailResponse(result.data)) { + return { + meta: result.meta, + error: { + error: `Invalid response from /v1/providers/${args.providerName}`, + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } + return { data: result.data }; }, - providesTags: [{ type: "CONFIGURED_PROVIDERS", id: "LIST" }], }), - getProviderTemplates: builder.query<ProviderTemplatesResponse, undefined>({ - providesTags: ["TEMPLATE_PROVIDERS"], - queryFn: async (_args, api, extraOptions, baseQuery) => { + + getProviderSchema: builder.query< + ProviderSchemaResponse, + { providerName: string } + >({ + providesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER_SCHEMA", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { const state = api.getState() as RootState; const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${PROVIDER_TEMPLATES_URL}`; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/schema`; const result = await baseQuery({ ...extraOptions, @@ -74,14 +319,16 @@ export const providersApi = createApi({ credentials: "same-origin", redirect: "follow", }); + if (result.error) { return { error: result.error }; } - if (!isProviderTemplatesResponse(result.data)) { + + if (!isProviderSchemaResponse(result.data)) { return { meta: result.meta, error: { - error: "Invalid response from /v1/provider-templates", + error: `Invalid response from /v1/providers/${args.providerName}/schema`, data: result.data, status: "CUSTOM_ERROR", }, @@ -91,20 +338,23 @@ export const providersApi = createApi({ return { data: result.data }; }, }), - getProvider: builder.query<Provider, { providerName: string }>({ - providesTags: ["PROVIDER"], + + getProviderModels: builder.query< + ProviderModelsResponse, + { providerName: string } + >({ + providesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER_MODELS", id: providerName }, + ], queryFn: async (args, api, extraOptions, baseQuery) => { const state = api.getState() as RootState; const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${PROVIDER_URL}`; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/models`; const result = await baseQuery({ ...extraOptions, method: "GET", url, - params: { - "provider-name": args.providerName, - }, credentials: "same-origin", redirect: "follow", }); @@ -113,11 +363,11 @@ export const providersApi = createApi({ return { error: result.error }; } - if (!isProvider(result.data)) { + if (!isProviderModelsResponse(result.data)) { return { meta: result.meta, error: { - error: "Invalid response from /v1/provider", + error: `Invalid response from /v1/providers/${args.providerName}/models`, data: result.data, status: "CUSTOM_ERROR", }, @@ -127,31 +377,37 @@ export const providersApi = createApi({ return { data: result.data }; }, }), - updateProvider: builder.mutation<unknown, Provider>({ - invalidatesTags: (_result, _error, args) => [ - { type: "PROVIDER", id: args.name }, + + // Get all available models for a provider (discovered + custom) + getAvailableModels: builder.query< + AvailableModelsResponse, + { providerName: string } + >({ + providesTags: (_result, _error, { providerName }) => [ + { type: "AVAILABLE_MODELS", id: providerName }, ], queryFn: async (args, api, extraOptions, baseQuery) => { const state = api.getState() as RootState; const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${PROVIDER_URL}`; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/available-models`; const result = await baseQuery({ ...extraOptions, - method: "POST", + method: "GET", url, - body: { ...args }, credentials: "same-origin", redirect: "follow", }); + if (result.error) { return { error: result.error }; } - if (isDetailMessage(result.data)) { + + if (!isAvailableModelsResponse(result.data)) { return { meta: result.meta, error: { - error: "Invalid response from /v1/provider", + error: `Invalid response from /v1/providers/${args.providerName}/available-models`, data: result.data, status: "CUSTOM_ERROR", }, @@ -161,22 +417,299 @@ export const providersApi = createApi({ return { data: result.data }; }, }), - deleteProvider: builder.mutation<unknown, string>({ - invalidatesTags: (_result, _error, args) => [ - { type: "PROVIDER", id: args }, + + getOpenRouterModelEndpoints: builder.query< + OpenRouterModelEndpointsResponse, + { providerName: string; modelId: string } + >({ + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${ + args.providerName + }/models/${encodeURIComponent(args.modelId)}/endpoints`; + + const result = await baseQuery({ + ...extraOptions, + method: "GET", + url, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + if (!isOpenRouterModelEndpointsResponse(result.data)) { + return { + meta: result.meta, + error: { + error: `Invalid response from /v1/providers/${args.providerName}/models/${args.modelId}/endpoints`, + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } + + return { data: result.data }; + }, + }), + + getOpenRouterAccountInfo: builder.query< + OpenRouterAccountInfoResponse, + undefined + >({ + queryFn: async (_args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}/v1/openrouter/account-info`; + + const result = await baseQuery({ + ...extraOptions, + method: "GET", + url, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + return { data: result.data as OpenRouterAccountInfoResponse }; + }, + }), + + getOpenRouterHealth: builder.query<OpenRouterHealthResponse, undefined>({ + queryFn: async (_args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}/v1/openrouter/health`; + + const result = await baseQuery({ + ...extraOptions, + method: "GET", + url, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + return { data: result.data as OpenRouterHealthResponse }; + }, + }), + + // Toggle model enabled/disabled + toggleModel: builder.mutation< + { success: boolean; model_id: string; enabled: boolean }, + { providerName: string; modelId: string; enabled: boolean } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "AVAILABLE_MODELS", id: providerName }, + { type: "PROVIDER", id: providerName }, ], queryFn: async (args, api, extraOptions, baseQuery) => { const state = api.getState() as RootState; const port = state.config.lspPort as unknown as number; - const url = `http://127.0.0.1:${port}${PROVIDER_URL}`; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/models/toggle`; const result = await baseQuery({ ...extraOptions, - method: "DELETE", + method: "POST", + url, + body: { model_id: args.modelId, enabled: args.enabled }, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + const data = result.data as + | { success?: boolean; detail?: string } + | undefined; + if (data?.success === false) { + return { + meta: result.meta, + error: { + error: data.detail ?? "Failed to toggle model", + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } + + return { + data: { + success: true, + model_id: args.modelId, + enabled: args.enabled, + }, + }; + }, + }), + + setModelProvider: builder.mutation< + { + success: boolean; + model_id: string; + selected_provider?: string | null; + }, + { + providerName: string; + modelId: string; + selectedProvider?: string | null; + } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "AVAILABLE_MODELS", id: providerName }, + { type: "PROVIDER", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/models/provider`; + + const result = await baseQuery({ + ...extraOptions, + method: "POST", url, - params: { - "provider-name": args, + body: { + model_id: args.modelId, + selected_provider: args.selectedProvider ?? null, + }, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + return { + data: { + success: true, + model_id: args.modelId, + selected_provider: args.selectedProvider ?? null, }, + }; + }, + }), + + // Add custom model + addCustomModel: builder.mutation< + { success: boolean; model_id: string }, + { providerName: string; model: AddCustomModelRequest } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "AVAILABLE_MODELS", id: providerName }, + { type: "PROVIDER", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/custom-models`; + + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: args.model, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + const data = result.data as + | { success?: boolean; detail?: string } + | undefined; + if (data?.success === false) { + return { + meta: result.meta, + error: { + error: data.detail ?? "Failed to add custom model", + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } + + return { data: { success: true, model_id: args.model.id } }; + }, + }), + + // Remove custom model + removeCustomModel: builder.mutation< + { success: boolean; model_id: string }, + { providerName: string; modelId: string } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "AVAILABLE_MODELS", id: providerName }, + { type: "PROVIDER", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/custom-models/remove`; + + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: { model_id: args.modelId }, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + const data = result.data as + | { success?: boolean; detail?: string } + | undefined; + if (data?.success === false) { + return { + meta: result.meta, + error: { + error: data.detail ?? "Failed to remove custom model", + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } + + return { data: { success: true, model_id: args.modelId } }; + }, + }), + + updateProvider: builder.mutation< + { success: boolean }, + { providerName: string; settings: Record<string, unknown> } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDER_MODELS", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}`; + + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: args.settings, credentials: "same-origin", redirect: "follow", }); @@ -187,176 +720,331 @@ export const providersApi = createApi({ return { meta: result.meta, error: { - error: "Invalid response from /v1/provider", + error: `Failed to update provider ${args.providerName}`, data: result.data, status: "CUSTOM_ERROR", }, }; } - return { data: result.data }; + return { data: { success: true } }; }, }), - }), - refetchOnMountOrArgChange: true, -}); -export type Provider = { - name: string; - endpoint_style: "openai" | "hf"; - chat_endpoint: string; - completion_endpoint: string; - embedding_endpoint: string; - api_key: string; + oauthStart: builder.mutation< + { session_id: string; authorize_url: string }, + { providerName: string; mode?: string } + >({ + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/oauth/start`; - chat_default_model: string; - chat_thinking_model: string; - chat_light_model: string; + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: { mode: args.mode ?? "max" }, + credentials: "same-origin", + redirect: "follow", + }); + if (result.error) { + return { error: result.error }; + } + const data = result.data as { + session_id: string; + authorize_url: string; + }; + return { data }; + }, + }), - enabled: boolean; - readonly: boolean; - supports_completion?: boolean; -}; + oauthExchange: builder.mutation< + { success: boolean; auth_status: string }, + { providerName: string; session_id: string; code: string } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + { type: "AVAILABLE_MODELS", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/oauth/exchange`; -export type SimplifiedProvider< - T extends keyof Provider | undefined = undefined, -> = [T] extends [undefined] - ? Partial<Provider> - : Required<Pick<Provider, T & keyof Provider>>; + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: { session_id: args.session_id, code: args.code }, + credentials: "same-origin", + redirect: "follow", + }); + if (result.error) { + return { error: result.error }; + } + const data = result.data as { + success: boolean; + auth_status: string; + }; + return { data }; + }, + }), -export type ErrorLogInstance = { - path: string; - error_line: number; - error_msg: string; -}; + oauthLogout: builder.mutation< + { success: boolean }, + { providerName: string } + >({ + invalidatesTags: (_result, _error, { providerName }) => [ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + { type: "AVAILABLE_MODELS", id: providerName }, + ], + queryFn: async (args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${args.providerName}/oauth/logout`; -export type ConfiguredProvidersResponse = { - providers: SimplifiedProvider< - "name" | "enabled" | "readonly" | "supports_completion" - >[]; - error_log: ErrorLogInstance[]; -}; + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + credentials: "same-origin", + redirect: "follow", + }); + if (result.error) { + return { error: result.error }; + } + return { data: { success: true } }; + }, + }), -export type ProviderTemplatesResponse = { - provider_templates: SimplifiedProvider<"name">[]; -}; + deleteProvider: builder.mutation<{ success: boolean }, string>({ + invalidatesTags: (_result, _error, providerName) => [ + { type: "PROVIDER", id: providerName }, + { type: "PROVIDER_MODELS", id: providerName }, + { type: "PROVIDERS", id: "LIST" }, + ], + queryFn: async (providerName, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDERS_URL}/${providerName}`; -export const providersEndpoints = providersApi.endpoints; + const result = await baseQuery({ + ...extraOptions, + method: "DELETE", + url, + credentials: "same-origin", + redirect: "follow", + }); + if (result.error) { + return { error: result.error }; + } + if (isDetailMessage(result.data)) { + return { + meta: result.meta, + error: { + error: `Failed to delete provider ${providerName}`, + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } -export function isProvider(data: unknown): data is Provider { - if (typeof data !== "object" || data === null) return false; + return { data: { success: true } }; + }, + }), - if ( - !hasProperty(data, "name") || - !hasProperty(data, "endpoint_style") || - !hasProperty(data, "chat_endpoint") || - !hasProperty(data, "completion_endpoint") || - !hasProperty(data, "embedding_endpoint") || - !hasProperty(data, "api_key") || - !hasProperty(data, "chat_default_model") || - !hasProperty(data, "chat_thinking_model") || - !hasProperty(data, "chat_light_model") || - !hasProperty(data, "enabled") - ) - return false; + getDefaults: builder.query<ProviderDefaults, undefined>({ + providesTags: ["DEFAULTS"], + queryFn: async (_args, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDER_DEFAULTS_URL}`; - if (typeof data.name !== "string") return false; - if (data.endpoint_style !== "openai" && data.endpoint_style !== "hf") - return false; - if (typeof data.chat_endpoint !== "string") return false; - if (typeof data.completion_endpoint !== "string") return false; - if (typeof data.embedding_endpoint !== "string") return false; - if (typeof data.api_key !== "string") return false; - if (typeof data.chat_default_model !== "string") return false; - if (typeof data.chat_thinking_model !== "string") return false; - if (typeof data.chat_light_model !== "string") return false; - if (typeof data.enabled !== "boolean") return false; + const result = await baseQuery({ + ...extraOptions, + method: "GET", + url, + credentials: "same-origin", + redirect: "follow", + }); - return true; -} + if (result.error) { + return { error: result.error }; + } -export function isConfiguredProvidersResponse( - data: unknown, -): data is ConfiguredProvidersResponse { - // Check if data is an object - if (typeof data !== "object" || data === null) return false; + if (!isProviderDefaults(result.data)) { + return { + meta: result.meta, + error: { + error: "Invalid response from /v1/defaults", + data: result.data, + status: "CUSTOM_ERROR", + }, + }; + } - if (!hasProperty(data, "providers") || !hasProperty(data, "error_log")) - return false; + return { data: result.data }; + }, + }), - if (!Array.isArray(data.providers)) return false; + updateDefaults: builder.mutation<{ success: boolean }, ProviderDefaults>({ + invalidatesTags: ["DEFAULTS"], + queryFn: async (defaults, api, extraOptions, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort as unknown as number; + const url = `http://127.0.0.1:${port}${PROVIDER_DEFAULTS_URL}`; + + const result = await baseQuery({ + ...extraOptions, + method: "POST", + url, + body: defaults, + credentials: "same-origin", + redirect: "follow", + }); + + if (result.error) { + return { error: result.error }; + } + + return { data: { success: true } }; + }, + }), + }), + refetchOnMountOrArgChange: true, +}); - if (!Array.isArray(data.error_log)) return false; +function isProviderListResponse(data: unknown): data is ProviderListResponse { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "providers")) return false; + if (!Array.isArray(data.providers)) return false; for (const provider of data.providers) { - if (!isSimplifiedProvider(provider)) return false; + if (!isProviderListItem(provider)) return false; } - for (const errorLog of data.error_log) { - if (!isErrorLogInstance(errorLog)) return false; - } + return true; +} +function isProviderListItem(data: unknown): data is ProviderListItem { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "name") || typeof data.name !== "string") return false; + if ( + !hasProperty(data, "display_name") || + typeof data.display_name !== "string" + ) + return false; + if (!hasProperty(data, "enabled") || typeof data.enabled !== "boolean") + return false; + if (!hasProperty(data, "readonly") || typeof data.readonly !== "boolean") + return false; + if (!hasProperty(data, "model_count") || typeof data.model_count !== "number") + return false; + // has_credentials and status are optional for backward compat return true; } -export function isProviderTemplatesResponse( +function isProviderDetailResponse( data: unknown, -): data is ProviderTemplatesResponse { +): data is ProviderDetailResponse { if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "name") || typeof data.name !== "string") return false; + if ( + !hasProperty(data, "display_name") || + typeof data.display_name !== "string" + ) + return false; + if (!hasProperty(data, "enabled") || typeof data.enabled !== "boolean") + return false; + if (!hasProperty(data, "readonly") || typeof data.readonly !== "boolean") + return false; + if (!hasProperty(data, "settings")) return false; + // runtime can be null + return true; +} - if (!hasProperty(data, "provider_templates")) return false; - - if (!Array.isArray(data.provider_templates)) return false; - - for (const template of data.provider_templates) { - if (!isSimplifiedProviderWithName(template)) return false; - } - +function isProviderSchemaResponse( + data: unknown, +): data is ProviderSchemaResponse { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "name") || typeof data.name !== "string") return false; + if (!hasProperty(data, "schema") || typeof data.schema !== "string") + return false; return true; } -function isSimplifiedProviderWithName( - template: unknown, -): template is SimplifiedProvider<"name"> { - if (typeof template !== "object" || template === null) return false; +function isProviderModelsResponse( + data: unknown, +): data is ProviderModelsResponse { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "models")) return false; + if (!Array.isArray(data.models)) return false; + return true; +} - if (!hasProperty(template, "name")) return false; +function isAvailableModelsResponse( + data: unknown, +): data is AvailableModelsResponse { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "models")) return false; + if (!Array.isArray(data.models)) return false; + if (!hasProperty(data, "source")) return false; + return true; +} - return typeof template.name === "string"; +function isOpenRouterModelEndpointsResponse( + data: unknown, +): data is OpenRouterModelEndpointsResponse { + if (typeof data !== "object" || data === null) return false; + if (!hasProperty(data, "provider_variants")) return false; + if (!hasProperty(data, "available_providers")) return false; + if (!Array.isArray(data.available_providers)) return false; + return true; } -function isSimplifiedProvider( - provider: unknown, -): provider is SimplifiedProvider<"name" | "enabled"> { - if (typeof provider !== "object" || provider === null) return false; +function isModelTypeDefaults(data: unknown): data is ModelTypeDefaults { + if (typeof data !== "object" || data === null) return false; + return true; +} - if (!hasProperty(provider, "name") || !hasProperty(provider, "enabled")) +function isProviderDefaults(data: unknown): data is ProviderDefaults { + if (typeof data !== "object" || data === null) return false; + const obj = data as Record<string, unknown>; + if (hasProperty(obj, "chat") && !isModelTypeDefaults(obj.chat)) return false; + if (hasProperty(obj, "chat_light") && !isModelTypeDefaults(obj.chat_light)) return false; - if ( - hasProperty(provider, "readonly") && - typeof provider.readonly !== "boolean" + hasProperty(obj, "chat_thinking") && + !isModelTypeDefaults(obj.chat_thinking) ) return false; - - return ( - typeof provider.name === "string" && typeof provider.enabled === "boolean" - ); + if (hasProperty(obj, "detail")) return false; + return true; } -function isErrorLogInstance(errorLog: unknown): errorLog is ErrorLogInstance { - if (typeof errorLog !== "object" || errorLog === null) return false; - - if ( - !hasProperty(errorLog, "path") || - !hasProperty(errorLog, "error_line") || - !hasProperty(errorLog, "error_msg") - ) - return false; +export const providersEndpoints = providersApi.endpoints; - return ( - typeof errorLog.path === "string" && - typeof errorLog.error_line === "number" && - typeof errorLog.error_msg === "string" - ); -} +export const { + useGetConfiguredProvidersQuery, + useGetProviderQuery, + useGetProviderSchemaQuery, + useGetProviderModelsQuery, + useGetAvailableModelsQuery, + useGetOpenRouterModelEndpointsQuery, + useGetOpenRouterAccountInfoQuery, + useGetOpenRouterHealthQuery, + useToggleModelMutation, + useSetModelProviderMutation, + useAddCustomModelMutation, + useRemoveCustomModelMutation, + useUpdateProviderMutation, + useDeleteProviderMutation, + useGetDefaultsQuery, + useUpdateDefaultsMutation, + useOauthStartMutation, + useOauthExchangeMutation, + useOauthLogoutMutation, +} = providersApi; diff --git a/refact-agent/gui/src/services/refact/sidebarSubscription.ts b/refact-agent/gui/src/services/refact/sidebarSubscription.ts new file mode 100644 index 000000000..5986d48a5 --- /dev/null +++ b/refact-agent/gui/src/services/refact/sidebarSubscription.ts @@ -0,0 +1,239 @@ +import type { TrajectoryMeta, TrajectoryEvent } from "./trajectories"; +import type { TaskMeta, TaskBoard } from "./tasks"; + +export type { TrajectoryMeta, TrajectoryEvent }; + +export type TaskEvent = + | { type: "snapshot"; tasks: TaskMeta[] } + | { type: "task_created"; task_id: string; meta: TaskMeta } + | { type: "task_updated"; task_id: string; meta: TaskMeta } + | { type: "task_deleted"; task_id: string } + | { type: "board_changed"; task_id: string; rev: number; board: TaskBoard }; + +export type NotificationEvent = + | { + type: "task_done"; + chat_id: string; + tool_call_id: string; + summary: string; + knowledge_path?: string; + } + | { + type: "ask_questions"; + chat_id: string; + tool_call_id: string; + questions: { + id: string; + type: string; + text: string; + options?: string[]; + }[]; + }; + +export type SidebarEvent = + | { + category: "snapshot"; + trajectories: TrajectoryMeta[]; + tasks: TaskMeta[]; + } + | ({ category: "trajectory" } & TrajectoryEvent) + | ({ category: "task" } & TaskEvent) + | ({ category: "notification" } & NotificationEvent); + +export type SidebarEventEnvelope = { + seq: number; +} & SidebarEvent; + +export type SidebarSubscriptionCallbacks = { + onEvent: (event: SidebarEventEnvelope) => void; + onError: (error: Error) => void; + onConnected?: () => void; + onDisconnected?: () => void; +}; + +function isValidSnapshot(obj: Record<string, unknown>): boolean { + return Array.isArray(obj.trajectories) && Array.isArray(obj.tasks); +} + +function isValidTrajectoryEvent(obj: Record<string, unknown>): boolean { + return typeof obj.type === "string" && typeof obj.id === "string"; +} + +function isValidTaskEvent(obj: Record<string, unknown>): boolean { + if (typeof obj.type !== "string") return false; + if (obj.type === "snapshot") return Array.isArray(obj.tasks); + if (obj.type === "task_deleted") return typeof obj.task_id === "string"; + if (obj.type === "board_changed") + return typeof obj.task_id === "string" && obj.board !== undefined; + return typeof obj.task_id === "string" && obj.meta !== undefined; +} + +function isValidNotificationEvent(obj: Record<string, unknown>): boolean { + if (typeof obj.type !== "string") return false; + if (typeof obj.chat_id !== "string") return false; + if (typeof obj.tool_call_id !== "string") return false; + + if (obj.type === "task_done") { + return typeof obj.summary === "string"; + } + + if (obj.type === "ask_questions") { + return Array.isArray(obj.questions); + } + + return false; +} + +function isValidSidebarEventEnvelope( + data: unknown, +): data is SidebarEventEnvelope { + if (typeof data !== "object" || data === null) return false; + const obj = data as Record<string, unknown>; + if (typeof obj.seq !== "number") return false; + if (typeof obj.category !== "string") return false; + + switch (obj.category) { + case "snapshot": + return isValidSnapshot(obj); + case "trajectory": + return isValidTrajectoryEvent(obj); + case "task": + return isValidTaskEvent(obj); + case "notification": + return isValidNotificationEvent(obj); + default: + return false; + } +} + +const IDLE_TIMEOUT_MS = 30_000; + +export function subscribeToSidebarEvents( + port: number, + apiKey: string | null, + callbacks: SidebarSubscriptionCallbacks, +): () => void { + const url = `http://127.0.0.1:${port}/v1/sidebar/subscribe`; + const abortController = new AbortController(); + const state = { connected: false, lastSeq: -1, aborted: false }; + let idleTimer: ReturnType<typeof setTimeout> | null = null; + + const headers: Record<string, string> = {}; + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + + const resetIdleTimer = () => { + if (idleTimer) clearTimeout(idleTimer); + idleTimer = setTimeout(() => { + abortController.abort(); + }, IDLE_TIMEOUT_MS); + }; + + const cleanup = () => { + if (idleTimer) { + clearTimeout(idleTimer); + idleTimer = null; + } + if (!state.aborted) { + state.aborted = true; + abortController.abort(); + } + if (state.connected) { + state.connected = false; + callbacks.onDisconnected?.(); + } + }; + + void fetch(url, { + method: "GET", + headers, + signal: abortController.signal, + }) + .then(async (response) => { + if (!response.ok) { + throw new Error(`SSE connection failed: ${response.status}`); + } + if (!response.body) { + throw new Error("Response body is null"); + } + + state.connected = true; + callbacks.onConnected?.(); + resetIdleTimer(); + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + try { + for (;;) { + const { done, value } = await reader.read(); + if (done) break; + + resetIdleTimer(); + buffer += decoder.decode(value, { stream: true }); + buffer = buffer.replace(/\r\n/g, "\n").replace(/\r/g, "\n"); + + const blocks = buffer.split("\n\n"); + buffer = blocks.pop() ?? ""; + + for (const block of blocks) { + if (!block.trim()) continue; + if (block.startsWith(":")) continue; + + const dataLines: string[] = []; + for (const rawLine of block.split("\n")) { + if (rawLine.startsWith(":")) continue; + if (!rawLine.startsWith("data:")) continue; + dataLines.push(rawLine.slice(5).replace(/^\s*/, "")); + } + + if (dataLines.length === 0) continue; + + const dataStr = dataLines.join("\n"); + if (dataStr === "[DONE]") continue; + + let parsed: unknown; + try { + parsed = JSON.parse(dataStr); + } catch (e) { + const msg = e instanceof Error ? e.message : "JSON parse error"; + throw new Error(`Parse error: ${msg}`); + } + + if (!isValidSidebarEventEnvelope(parsed)) { + throw new Error("Invalid event structure"); + } + + if (parsed.category === "snapshot") { + state.lastSeq = parsed.seq; + } else if (state.lastSeq >= 0 && parsed.seq !== state.lastSeq + 1) { + throw new Error( + `Seq gap: expected ${state.lastSeq + 1}, got ${parsed.seq}`, + ); + } else { + state.lastSeq = parsed.seq; + } + + callbacks.onEvent(parsed); + } + } + } finally { + await reader.cancel().catch(() => { + // Ignore cancel errors - connection already closed + }); + } + + cleanup(); + }) + .catch((err: unknown) => { + const error = err as Error; + if (error.name !== "AbortError") { + callbacks.onError(error); + } + cleanup(); + }); + + return cleanup; +} diff --git a/refact-agent/gui/src/services/refact/tasks.ts b/refact-agent/gui/src/services/refact/tasks.ts new file mode 100644 index 000000000..11b9e32f0 --- /dev/null +++ b/refact-agent/gui/src/services/refact/tasks.ts @@ -0,0 +1,329 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import { RootState } from "../../app/store"; + +export interface TaskMeta { + id: string; + name: string; + status: "planning" | "active" | "paused" | "completed" | "abandoned"; + created_at: string; + updated_at: string; + cards_total: number; + cards_done: number; + cards_failed: number; + agents_active: number; + base_branch?: string; + base_commit?: string; + default_agent_model?: string; + planner_session_state?: + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "error"; +} + +export interface BoardColumn { + id: string; + title: string; +} + +export interface StatusUpdate { + timestamp: string; + message: string; +} + +export interface BoardCard { + id: string; + title: string; + column: string; + priority: string; + depends_on: string[]; + instructions: string; + assignee: string | null; + agent_chat_id: string | null; + status_updates: StatusUpdate[]; + final_report: string | null; + created_at: string; + started_at: string | null; + completed_at: string | null; + agent_branch?: string; + agent_worktree?: string; +} + +export interface TaskBoard { + schema_version: number; + rev: number; + columns: BoardColumn[]; + cards: BoardCard[]; +} + +export interface ReadyCardsResult { + ready: string[]; + blocked: string[]; + in_progress: string[]; + completed: string[]; + failed: string[]; +} + +export interface TrajectoryInfo { + id: string; + title: string; + created_at: string; + updated_at: string; + session_state?: string; +} + +export const tasksApi = createApi({ + reducerPath: "tasksApi", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const token = (getState() as RootState).config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + tagTypes: ["Tasks", "Board"], + endpoints: (builder) => ({ + listTasks: builder.query<TaskMeta[], undefined>({ + queryFn: async (_args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks`, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskMeta[] }; + }, + providesTags: ["Tasks"], + }), + + createTask: builder.mutation<TaskMeta, { name: string }>({ + queryFn: async (args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks`, + method: "POST", + body: args, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskMeta }; + }, + invalidatesTags: ["Tasks"], + }), + + getTask: builder.query<TaskMeta, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}`, + }); + if (result.error) return { error: result.error }; + const response = result.data as { meta: TaskMeta }; + return { data: response.meta }; + }, + providesTags: (_result, _error, taskId) => [ + { type: "Tasks", id: taskId }, + ], + }), + + deleteTask: builder.mutation<{ deleted: boolean }, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}`, + method: "DELETE", + }); + if (result.error) return { error: result.error }; + return { data: { deleted: true } }; + }, + invalidatesTags: ["Tasks"], + }), + + updateTaskStatus: builder.mutation< + TaskMeta, + { taskId: string; status: TaskMeta["status"] } + >({ + queryFn: async ({ taskId, status }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/status`, + method: "POST", + body: { status }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskMeta }; + }, + invalidatesTags: (_result, _error, { taskId }) => [ + { type: "Tasks", id: taskId }, + "Tasks", + ], + }), + + getBoard: builder.query<TaskBoard, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/board`, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskBoard }; + }, + providesTags: (_result, _error, taskId) => [ + { type: "Board", id: taskId }, + ], + }), + + patchBoard: builder.mutation< + TaskBoard, + { taskId: string; board: Partial<TaskBoard> } + >({ + queryFn: async ({ taskId, board }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/board`, + method: "POST", + body: board, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskBoard }; + }, + invalidatesTags: (_result, _error, { taskId }) => [ + { type: "Board", id: taskId }, + ], + }), + + getReadyCards: builder.query<ReadyCardsResult, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/board/ready`, + }); + if (result.error) return { error: result.error }; + return { data: result.data as ReadyCardsResult }; + }, + }), + + getOrchestratorInstructions: builder.query<string, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/planner-instructions`, + }); + if (result.error) return { error: result.error }; + const payload = result.data as { content?: unknown }; + return { + data: typeof payload.content === "string" ? payload.content : "", + }; + }, + }), + + setOrchestratorInstructions: builder.mutation< + { saved: boolean }, + { taskId: string; content: string } + >({ + queryFn: async ({ taskId, content }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/planner-instructions`, + method: "PUT", + body: { content }, + }); + if (result.error) return { error: result.error }; + return { data: { saved: true } }; + }, + }), + + listTaskTrajectories: builder.query< + TrajectoryInfo[], + { taskId: string; role: string } + >({ + queryFn: async ({ taskId, role }, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/trajectories/${role}`, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TrajectoryInfo[] }; + }, + }), + + createPlannerChat: builder.mutation<{ chat_id: string }, string>({ + queryFn: async (taskId, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/planner-chats`, + method: "POST", + }); + if (result.error) return { error: result.error }; + return { data: result.data as { chat_id: string } }; + }, + }), + + updateTaskMeta: builder.mutation< + TaskMeta, + { + taskId: string; + name?: string; + baseBranch?: string; + baseCommit?: string; + defaultAgentModel?: string; + } + >({ + queryFn: async ( + { taskId, name, baseBranch, baseCommit, defaultAgentModel }, + api, + _opts, + baseQuery, + ) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const body: Record<string, string> = {}; + if (name !== undefined) body.name = name; + if (baseBranch !== undefined) body.base_branch = baseBranch; + if (baseCommit !== undefined) body.base_commit = baseCommit; + if (defaultAgentModel !== undefined) + body.default_agent_model = defaultAgentModel; + const result = await baseQuery({ + url: `http://127.0.0.1:${port}/v1/tasks/${taskId}/meta`, + method: "PATCH", + body, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TaskMeta }; + }, + invalidatesTags: (_result, _error, { taskId }) => [ + { type: "Tasks", id: taskId }, + ], + }), + }), +}); + +export const { + useListTasksQuery, + useCreateTaskMutation, + useGetTaskQuery, + useDeleteTaskMutation, + useUpdateTaskStatusMutation, + useUpdateTaskMetaMutation, + useGetBoardQuery, + usePatchBoardMutation, + useGetReadyCardsQuery, + useGetOrchestratorInstructionsQuery, + useSetOrchestratorInstructionsMutation, + useListTaskTrajectoriesQuery, + useCreatePlannerChatMutation, +} = tasksApi; diff --git a/refact-agent/gui/src/services/refact/tools.ts b/refact-agent/gui/src/services/refact/tools.ts index 1411b06e0..dce15dd31 100644 --- a/refact-agent/gui/src/services/refact/tools.ts +++ b/refact-agent/gui/src/services/refact/tools.ts @@ -196,6 +196,7 @@ export type ToolSpec = { parameters_required?: string[]; agentic: boolean; experimental?: boolean; + allow_parallel?: boolean; }; export type Tool = { @@ -204,7 +205,9 @@ export type Tool = { }; export type ToolConfirmationPauseReason = { - type: "confirmation" | "denial"; + type: "confirmation" | "denial" | "unknown"; + raw_type?: string; + tool_name: string; command: string; rule: string; tool_call_id: string; diff --git a/refact-agent/gui/src/services/refact/trajectories.ts b/refact-agent/gui/src/services/refact/trajectories.ts new file mode 100644 index 000000000..435fcca9a --- /dev/null +++ b/refact-agent/gui/src/services/refact/trajectories.ts @@ -0,0 +1,249 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import { ChatThread } from "../../features/Chat/Thread/types"; +import { ChatMessages } from "./types"; +import { RootState } from "../../app/store"; + +export type TrajectoryMeta = { + id: string; + title: string; + created_at: string; + updated_at: string; + model: string; + mode: string; + message_count: number; + parent_id?: string; + link_type?: string; + task_id?: string; + task_role?: string; + agent_id?: string; + card_id?: string; + session_state?: + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + root_chat_id?: string; + total_coins?: number; + total_lines_added: number; + total_lines_removed: number; + tasks_total: number; + tasks_done: number; + tasks_failed: number; +}; + +export type TrajectoryData = { + id: string; + title: string; + created_at: string; + updated_at: string; + model: string; + mode: string; + tool_use: string; + messages: ChatMessages; + boost_reasoning?: boolean; + context_tokens_cap?: number; + include_project_info?: boolean; + increase_max_tokens?: boolean; + project_name?: string; + isTitleGenerated?: boolean; +}; + +export type TrajectoryEvent = { + type: "created" | "updated" | "deleted"; + id: string; + updated_at?: string; + title?: string; + is_title_generated?: boolean; + session_state?: + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + error?: string; + message_count?: number; + parent_id?: string; + link_type?: string; + root_chat_id?: string; + model?: string; + mode?: string; + total_coins?: number; + total_lines_added?: number; + total_lines_removed?: number; + tasks_total?: number; + tasks_done?: number; + tasks_failed?: number; +}; + +export type PaginatedTrajectories = { + items: TrajectoryMeta[]; + next_cursor: string | null; + has_more: boolean; + total_count: number; +}; + +export type TrajectoriesListParams = { + limit?: number; + cursor?: string; +}; + +export function chatThreadToTrajectoryData( + thread: ChatThread, + createdAt?: string, +): TrajectoryData { + const now = new Date().toISOString(); + return { + id: thread.id, + title: thread.title ?? "New Chat", + created_at: createdAt ?? now, + updated_at: now, + model: thread.model, + mode: thread.mode ?? "AGENT", + tool_use: thread.tool_use ?? "agent", + messages: thread.messages, + boost_reasoning: thread.boost_reasoning, + context_tokens_cap: thread.context_tokens_cap, + include_project_info: thread.include_project_info, + increase_max_tokens: thread.increase_max_tokens, + project_name: thread.project_name, + isTitleGenerated: thread.isTitleGenerated, + }; +} + +export function trajectoryDataToChatThread(data: TrajectoryData): ChatThread { + return { + id: data.id, + title: data.title, + model: data.model, + mode: data.mode as ChatThread["mode"], + tool_use: data.tool_use as ChatThread["tool_use"], + messages: data.messages, + boost_reasoning: data.boost_reasoning ?? false, + context_tokens_cap: data.context_tokens_cap, + include_project_info: data.include_project_info ?? true, + increase_max_tokens: data.increase_max_tokens ?? false, + project_name: data.project_name, + isTitleGenerated: data.isTitleGenerated, + createdAt: data.created_at, + last_user_message_id: "", + new_chat_suggested: { wasSuggested: false }, + }; +} + +export const trajectoriesApi = createApi({ + reducerPath: "trajectoriesApi", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const token = (getState() as RootState).config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + tagTypes: ["Trajectory"], + endpoints: (builder) => ({ + listTrajectoriesFirstPage: builder.query<TrajectoryMeta[], undefined>({ + queryFn: async (_args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = `http://127.0.0.1:${port}/v1/trajectories`; + const result = await baseQuery({ url }); + if (result.error) return { error: result.error }; + const response = result.data as PaginatedTrajectories; + return { data: response.items }; + }, + providesTags: ["Trajectory"], + }), + listTrajectoriesPaginated: builder.query< + PaginatedTrajectories, + TrajectoriesListParams | undefined + >({ + queryFn: async (args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const params = new URLSearchParams(); + if (args?.limit) params.set("limit", String(args.limit)); + if (args?.cursor) params.set("cursor", args.cursor); + const queryString = params.toString(); + const url = `http://127.0.0.1:${port}/v1/trajectories${ + queryString ? `?${queryString}` : "" + }`; + const result = await baseQuery({ url }); + if (result.error) return { error: result.error }; + return { data: result.data as PaginatedTrajectories }; + }, + providesTags: ["Trajectory"], + }), + listAllTrajectories: builder.query<TrajectoryMeta[], undefined>({ + queryFn: async (_args, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = `http://127.0.0.1:${port}/v1/trajectories/all`; + const result = await baseQuery({ url }); + if (result.error) return { error: result.error }; + return { data: result.data as TrajectoryMeta[] }; + }, + providesTags: ["Trajectory"], + }), + getTrajectory: builder.query<TrajectoryData, string>({ + queryFn: async (id, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = `http://127.0.0.1:${port}/v1/trajectories/${id}`; + const result = await baseQuery({ url }); + if (result.error) return { error: result.error }; + return { data: result.data as TrajectoryData }; + }, + providesTags: (_result, _error, id) => [{ type: "Trajectory", id }], + }), + saveTrajectory: builder.mutation<undefined, TrajectoryData>({ + queryFn: async (data, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = `http://127.0.0.1:${port}/v1/trajectories/${data.id}`; + const result = await baseQuery({ + url, + method: "PUT", + body: data, + }); + if (result.error) return { error: result.error }; + return { data: undefined }; + }, + invalidatesTags: (_result, _error, data) => [ + { type: "Trajectory", id: data.id }, + "Trajectory", + ], + }), + deleteTrajectory: builder.mutation<undefined, string>({ + queryFn: async (id, api, _opts, baseQuery) => { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = `http://127.0.0.1:${port}/v1/trajectories/${id}`; + const result = await baseQuery({ + url, + method: "DELETE", + }); + if (result.error) return { error: result.error }; + return { data: undefined }; + }, + invalidatesTags: ["Trajectory"], + }), + }), +}); + +export const { + useListTrajectoriesFirstPageQuery, + useListTrajectoriesPaginatedQuery, + useListAllTrajectoriesQuery, + useGetTrajectoryQuery, + useSaveTrajectoryMutation, + useDeleteTrajectoryMutation, +} = trajectoriesApi; diff --git a/refact-agent/gui/src/services/refact/trajectory.ts b/refact-agent/gui/src/services/refact/trajectory.ts new file mode 100644 index 000000000..d5ea1269e --- /dev/null +++ b/refact-agent/gui/src/services/refact/trajectory.ts @@ -0,0 +1,195 @@ +import { createApi, fetchBaseQuery } from "@reduxjs/toolkit/query/react"; +import { RootState } from "../../app/store"; +import { + TRAJECTORY_TRANSFORM_PREVIEW_URL, + TRAJECTORY_TRANSFORM_APPLY_URL, + TRAJECTORY_HANDOFF_PREVIEW_URL, + TRAJECTORY_HANDOFF_APPLY_URL, + TRAJECTORY_MODE_TRANSITION_APPLY_URL, +} from "./consts"; + +export type TransformOptions = { + dedup_and_compress_context?: boolean; + drop_all_context?: boolean; + compress_non_agentic_tools?: boolean; + drop_all_memories?: boolean; + drop_project_information?: boolean; +}; + +export type HandoffOptions = { + include_last_user_plus?: boolean; + include_all_opened_context?: boolean; + include_all_edited_context?: boolean; + include_agentic_tools?: boolean; + llm_summary_for_excluded?: boolean; + include_all_user_assistant_only?: boolean; +}; + +export type TransformStats = { + before_message_count: number; + after_message_count: number; + before_approx_tokens: number; + after_approx_tokens: number; + context_messages_modified: number; + tool_messages_modified: number; +}; + +export type TransformPreviewResponse = { + stats: TransformStats; + actions: string[]; +}; + +export type TransformApplyResponse = { + stats: TransformStats; +}; + +export type HandoffPreviewResponse = { + stats: TransformStats; + actions: string[]; + llm_summary?: string | null; +}; + +export type HandoffApplyResponse = { + new_chat_id: string; + stats: TransformStats; +}; + +export type ModeTransitionApplyResponse = { + new_chat_id: string; + messages_count: number; +}; + +function buildUrl(template: string, chatId: string, port: number): string { + return `http://127.0.0.1:${port}${template.replace( + "{chat_id}", + encodeURIComponent(chatId), + )}`; +} + +export const trajectoryApi = createApi({ + reducerPath: "trajectoryApi", + baseQuery: fetchBaseQuery({ + prepareHeaders: (headers, { getState }) => { + const token = (getState() as RootState).config.apiKey; + if (token) { + headers.set("Authorization", `Bearer ${token}`); + } + return headers; + }, + }), + endpoints: (builder) => ({ + previewTransform: builder.mutation< + TransformPreviewResponse, + { chatId: string; options: TransformOptions } + >({ + async queryFn({ chatId, options }, api, _opts, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = buildUrl(TRAJECTORY_TRANSFORM_PREVIEW_URL, chatId, port); + const result = await baseQuery({ + url, + method: "POST", + body: { options }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TransformPreviewResponse }; + }, + }), + + applyTransform: builder.mutation< + TransformApplyResponse, + { chatId: string; options: TransformOptions } + >({ + async queryFn({ chatId, options }, api, _opts, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = buildUrl(TRAJECTORY_TRANSFORM_APPLY_URL, chatId, port); + const result = await baseQuery({ + url, + method: "POST", + body: { options }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as TransformApplyResponse }; + }, + }), + + previewHandoff: builder.mutation< + HandoffPreviewResponse, + { chatId: string; options: HandoffOptions } + >({ + async queryFn({ chatId, options }, api, _opts, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = buildUrl(TRAJECTORY_HANDOFF_PREVIEW_URL, chatId, port); + const result = await baseQuery({ + url, + method: "POST", + body: { options }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as HandoffPreviewResponse }; + }, + }), + + applyHandoff: builder.mutation< + HandoffApplyResponse, + { chatId: string; options: HandoffOptions } + >({ + async queryFn({ chatId, options }, api, _opts, baseQuery) { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = buildUrl(TRAJECTORY_HANDOFF_APPLY_URL, chatId, port); + const result = await baseQuery({ + url, + method: "POST", + body: { options }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as HandoffApplyResponse }; + }, + }), + + applyModeTransition: builder.mutation< + ModeTransitionApplyResponse, + { + chatId: string; + targetMode: string; + targetModeDescription?: string; + } + >({ + async queryFn( + { chatId, targetMode, targetModeDescription }, + api, + _opts, + baseQuery, + ) { + const state = api.getState() as RootState; + const port = state.config.lspPort; + const url = buildUrl( + TRAJECTORY_MODE_TRANSITION_APPLY_URL, + chatId, + port, + ); + const result = await baseQuery({ + url, + method: "POST", + body: { + target_mode: targetMode, + target_mode_description: targetModeDescription ?? "", + }, + }); + if (result.error) return { error: result.error }; + return { data: result.data as ModeTransitionApplyResponse }; + }, + }), + }), +}); + +export const { + usePreviewTransformMutation, + useApplyTransformMutation, + usePreviewHandoffMutation, + useApplyHandoffMutation, + useApplyModeTransitionMutation, +} = trajectoryApi; diff --git a/refact-agent/gui/src/services/refact/types.ts b/refact-agent/gui/src/services/refact/types.ts index 1a837c77b..f027b567e 100644 --- a/refact-agent/gui/src/services/refact/types.ts +++ b/refact-agent/gui/src/services/refact/types.ts @@ -1,6 +1,6 @@ import { LspChatMode } from "../../features/Chat"; import { Checkpoint } from "../../features/Checkpoints/types"; -import { GetChatTitleActionPayload, GetChatTitleResponse, Usage } from "./chat"; +import { Usage } from "./chat"; import { MCPArgs, MCPEnvs } from "./integrations"; export type ChatRole = @@ -25,14 +25,15 @@ export type ChatContextFile = { export type ToolCall = { function: { - arguments: string; // stringed json - name?: string; // will be present when it's new + arguments: string; + name?: string; }; index: number; type?: "function"; id?: string; attached_files?: string[]; subchat?: string; + subchat_log?: string[]; }; export type ToolUsage = { @@ -68,7 +69,7 @@ export function isToolContent(json: unknown): json is ToolContent { } export interface BaseToolResult { tool_call_id: string; - finish_reason?: string; // "call_failed" | "call_worked"; + finish_reason?: string; content: ToolContent; compression_strength?: CompressionStrength; tool_failed?: boolean; @@ -84,8 +85,8 @@ export interface MultiModalToolResult extends BaseToolResult { export type ToolResult = SingleModelToolResult | MultiModalToolResult; export type MultiModalToolContent = { - m_type: string; // "image/*" | "text" ... maybe narrow this? - m_content: string; // base64 if image, + m_type: string; + m_content: string; }; export function isMultiModalToolContent( @@ -117,10 +118,11 @@ export function isSingleModelToolResult(toolResult: ToolResult) { interface BaseMessage { role: ChatRole; + message_id?: string; content: | string | ChatContextFile[] - | ToolResult + | MultiModalToolContent[] | DiffChunk[] | null | (UserMessageContentWithImage | ProcessedUserMessageContentWithImages)[]; @@ -129,6 +131,7 @@ interface BaseMessage { export interface ChatContextFileMessage extends BaseMessage { role: "context_file"; content: ChatContextFile[]; + tool_call_id?: string; } export type UserImage = { @@ -166,13 +169,15 @@ export type WebSearchCitation = { export interface AssistantMessage extends BaseMessage, CostInfo { role: "assistant"; content: string | null; - reasoning_content?: string | null; // NOTE: only for internal UI usage, don't send it back + reasoning_content?: string | null; tool_calls?: ToolCall[] | null; - server_executed_tools?: ToolCall[] | null; // Tools executed by the provider (srvtoolu_*), for display only + server_executed_tools?: ToolCall[] | null; + server_content_blocks?: unknown[] | null; thinking_blocks?: ThinkingBlock[] | null; - citations?: WebSearchCitation[] | null; // Citations from server-executed tools like web_search - finish_reason?: "stop" | "length" | "abort" | "tool_calls" | null; + citations?: WebSearchCitation[] | null; + finish_reason?: "stop" | "length" | "abort" | "tool_calls" | "error" | null; usage?: Usage | null; + extra?: Record<string, unknown>; } export interface ToolCallMessage extends AssistantMessage { @@ -186,10 +191,12 @@ export interface SystemMessage extends BaseMessage { export interface ToolMessage extends BaseMessage { role: "tool"; - content: ToolResult; + content: string | MultiModalToolContent[]; + tool_call_id: string; + tool_failed?: boolean; + compression_strength?: CompressionStrength; } -// TODO: There maybe sub-types for this export type DiffChunk = { file_name: string; file_action: string; @@ -199,8 +206,6 @@ export type DiffChunk = { lines_add: string; file_name_rename?: string | null; application_details?: string; - // apply?: boolean; - // chunk_id?: number; }; export function isDiffChunk(json: unknown) { @@ -272,7 +277,7 @@ export type ChatMeta = { export function isChatContextFileMessage( message: ChatMessage, ): message is ChatContextFileMessage { - return message.role === "context_file"; + return message.role === "context_file" && Array.isArray(message.content); } export function isAssistantMessage( @@ -301,7 +306,6 @@ export function isToolCallMessage( if (!isAssistantMessage(message)) return false; const tool_calls = message.tool_calls; if (!tool_calls) return false; - // TODO: check browser support of every return tool_calls.every(isToolCall); } @@ -323,15 +327,12 @@ interface BaseDelta { citation?: WebSearchCitation; thinking_blocks?: ThinkingBlock[]; } | null; - // refusal?: null; - // function_call?: null; - // audio?: null; } interface AssistantDelta extends BaseDelta { role?: "assistant" | null; - content?: string | null; // might be undefined, will be null if tool_calls - reasoning_content?: string | null; // NOTE: only for internal UI usage, don't send it back + content?: string | null; + reasoning_content?: string | null; tool_calls?: ToolCall[] | null; thinking_blocks?: ThinkingBlock[] | null; } @@ -389,7 +390,7 @@ export type ThinkingBlock = { interface ThinkingBlocksDelta extends BaseDelta { thinking_blocks?: ThinkingBlock[]; - reasoning_content?: string | null; // NOTE: only for internal UI usage, don't send it back + reasoning_content?: string | null; } export function isThinkingBlocksDelta( @@ -421,10 +422,8 @@ type Delta = export type ChatChoice = { delta: Delta; - finish_reason?: "stop" | "length" | "abort" | "tool_calls" | null; + finish_reason?: "stop" | "length" | "abort" | "tool_calls" | "error" | null; index: number; - // TODO: what's this for? - // logprobs?: null; }; export type ChatUserMessageResponse = @@ -473,36 +472,6 @@ export type UserMessageResponse = ChatUserMessageResponse & { role: "user"; }; -export function isChatGetTitleResponse( - json: unknown, -): json is GetChatTitleResponse { - if (!json || typeof json !== "object") return false; - - const requiredKeys = [ - "id", - "choices", - // "metering_balance", // not in BYOK - "model", - "object", - "system_fingerprint", - "usage", - "created", - "deterministic_messages", - ]; - - return requiredKeys.every((key) => key in json); -} - -export function isChatGetTitleActionPayload( - json: unknown, -): json is GetChatTitleActionPayload { - if (!json || typeof json !== "object") return false; - - const requiredKeys = ["title", "chatId"]; - - return requiredKeys.every((key) => key in json); -} - export function isUserResponse(json: unknown): json is UserMessageResponse { if (!isChatUserMessageResponse(json)) return false; return json.role === "user"; @@ -548,7 +517,6 @@ export function isContextMemoryResponse( export function isToolResponse(json: unknown): json is ToolResponse { if (!json) return false; if (typeof json !== "object") return false; - // if (!("id" in json)) return false; if (!("content" in json)) return false; if (!("role" in json)) return false; if (!("tool_call_id" in json)) return false; @@ -556,8 +524,6 @@ export function isToolResponse(json: unknown): json is ToolResponse { return json.role === "tool"; } -// TODO: isThinkingBlocksResponse - export type DiffResponse = { role: "diff"; content: string; @@ -616,6 +582,9 @@ export function isCDInstructionResponse( return json.role === "cd_instruction"; } +import type { MeteringUsd } from "./chat"; +export type { MeteringUsd }; + type CostInfo = { metering_prompt_tokens_n?: number; metering_generated_tokens_n?: number; @@ -628,6 +597,8 @@ type CostInfo = { metering_coins_generated?: number; metering_coins_cache_creation?: number; metering_coins_cache_read?: number; + + metering_usd?: MeteringUsd; }; type ChatResponseChoice = { @@ -647,7 +618,6 @@ export function isChatResponseChoice( return true; } -// TODO: type checks for this. export type CompressionStrength = "absent" | "low" | "medium" | "high"; export type ChatResponse = | ChatResponseChoice @@ -665,32 +635,79 @@ export function areAllFieldsBoolean( ); } -export type MemoRecord = { +export type VecDbMemoRecord = { memid: string; - thevec?: number[]; // are options nullable? + thevec?: number[]; distance?: number; m_type: string; m_goal: string; m_project: string; m_payload: string; m_origin: string; - // mstat_correct: bigint, - // mstat_relevant: bigint, mstat_correct: number; mstat_relevant: number; mstat_times_used: number; }; + +export type KnowledgeMemoRecord = { + memid: string; + tags: string[]; + content: string; + file_path?: string; + line_range?: [number, number]; + title?: string; + created?: string; + kind?: string; + score?: number; +}; + +export type MemoRecord = KnowledgeMemoRecord; + export function isMemoRecord(obj: unknown): obj is MemoRecord { if (!obj) return false; if (typeof obj !== "object") return false; if (!("memid" in obj) || typeof obj.memid !== "string") return false; - // TODO: other checks return true; } +export type KnowledgeGraphNode = { + id: string; + node_type: string; + label: string; + title?: string; + content?: string; + tags?: string[]; + created?: string; + file_path?: string; + kind?: string; +}; + +export type KnowledgeGraphEdge = { + source: string; + target: string; + edge_type: string; +}; + +export type KnowledgeGraphStats = { + doc_count: number; + tag_count: number; + file_count: number; + entity_count: number; + edge_count: number; + active_docs: number; + deprecated_docs: number; + trajectory_count: number; +}; + +export type KnowledgeGraphResponse = { + nodes: KnowledgeGraphNode[]; + edges: KnowledgeGraphEdge[]; + stats: KnowledgeGraphStats; +}; + export type VecDbStatus = { files_unprocessed: number; - files_total: number; // only valid for status bar in the UI, resets to 0 when done + files_total: number; requests_made_since_start: number; vectors_made_since_start: number; db_size: number; diff --git a/refact-agent/gui/src/services/refact/voice.ts b/refact-agent/gui/src/services/refact/voice.ts new file mode 100644 index 000000000..72e13c793 --- /dev/null +++ b/refact-agent/gui/src/services/refact/voice.ts @@ -0,0 +1,157 @@ +function getVoiceApiBase(port: number): string { + return `http://127.0.0.1:${port}/v1/voice`; +} + +export interface TranscribeRequest { + audio_data: string; + mime_type?: string; + language?: string; +} + +export interface TranscribeResponse { + text: string; + language: string; + duration_ms: number; +} + +export interface VoiceStatusResponse { + enabled: boolean; + model_loaded: boolean; + model_name: string; + is_downloading: boolean; + download_progress: number; +} + +export interface DownloadModelRequest { + model?: string; +} + +export interface DownloadModelResponse { + success: boolean; + message: string; +} + +export async function transcribeAudio( + port: number, + request: TranscribeRequest, +): Promise<TranscribeResponse> { + const response = await fetch(`${getVoiceApiBase(port)}/transcribe`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(error || "Transcription failed"); + } + + return response.json() as Promise<TranscribeResponse>; +} + +export async function getVoiceStatus( + port: number, +): Promise<VoiceStatusResponse> { + const response = await fetch(`${getVoiceApiBase(port)}/status`); + if (!response.ok) { + throw new Error("Failed to get voice status"); + } + return response.json() as Promise<VoiceStatusResponse>; +} + +export async function downloadVoiceModel( + port: number, + model?: string, +): Promise<DownloadModelResponse> { + const response = await fetch(`${getVoiceApiBase(port)}/download`, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ model: model ?? "base.en" }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(error || "Download failed"); + } + + return response.json() as Promise<DownloadModelResponse>; +} + +export interface StreamingTranscriptEvent { + type: "transcript"; + session_id: string; + text: string; + is_final: boolean; + duration_ms: number; +} + +export interface StreamingErrorEvent { + type: "error"; + message: string; +} + +export interface StreamingEndedEvent { + type: "ended"; +} + +export type VoiceStreamEvent = + | StreamingTranscriptEvent + | StreamingErrorEvent + | StreamingEndedEvent; + +export function subscribeToVoiceStream( + port: number, + sessionId: string, + language: string | undefined, + onEvent: (event: VoiceStreamEvent) => void, + onError?: (error: Error) => void, +): () => void { + const params = new URLSearchParams(); + if (language) params.set("language", language); + const url = `${getVoiceApiBase( + port, + )}/stream/${sessionId}/subscribe?${params.toString()}`; + + const eventSource = new EventSource(url); + + eventSource.onmessage = (e) => { + const event = JSON.parse(e.data as string) as VoiceStreamEvent; + onEvent(event); + if (event.type === "ended") { + eventSource.close(); + } + }; + + eventSource.onerror = () => { + onError?.(new Error("Stream connection error")); + eventSource.close(); + }; + + return () => eventSource.close(); +} + +export async function sendVoiceChunk( + port: number, + sessionId: string, + audioData: string, + isFinal: boolean, + language?: string, +): Promise<void> { + const response = await fetch( + `${getVoiceApiBase(port)}/stream/${sessionId}/chunk`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + audio_data: audioData, + is_final: isFinal, + language, + }), + }, + ); + + if (!response.ok) { + const error = await response.text(); + throw new Error(error || "Failed to send chunk"); + } +} diff --git a/refact-agent/gui/src/services/smallcloud/index.ts b/refact-agent/gui/src/services/smallcloud/index.ts index c5b99c0fd..d6aa151ab 100644 --- a/refact-agent/gui/src/services/smallcloud/index.ts +++ b/refact-agent/gui/src/services/smallcloud/index.ts @@ -93,6 +93,7 @@ export const smallCloudApi = createApi({ return response; }, providesTags: ["User"], + keepUnusedDataFor: 5 * 60, }), getSurvey: builder.query<SurveyQuestions, undefined>({ diff --git a/refact-agent/gui/src/types/cytoscape-fcose.d.ts b/refact-agent/gui/src/types/cytoscape-fcose.d.ts new file mode 100644 index 000000000..e7c62bfb0 --- /dev/null +++ b/refact-agent/gui/src/types/cytoscape-fcose.d.ts @@ -0,0 +1,6 @@ +declare module "cytoscape-fcose" { + import type Cytoscape from "cytoscape"; + + const fcose: Cytoscape.Ext; + export default fcose; +} diff --git a/refact-agent/gui/src/types/react-cytoscapejs.d.ts b/refact-agent/gui/src/types/react-cytoscapejs.d.ts new file mode 100644 index 000000000..487803a1a --- /dev/null +++ b/refact-agent/gui/src/types/react-cytoscapejs.d.ts @@ -0,0 +1,19 @@ +declare module "react-cytoscapejs" { + import type Cytoscape from "cytoscape"; + import type { CSSProperties } from "react"; + + export interface CytoscapeComponentProps { + elements: { + data: Record<string, unknown>; + group?: "nodes" | "edges"; + }[]; + style?: CSSProperties; + stylesheet?: Cytoscape.StylesheetStyle[]; + layout?: Cytoscape.LayoutOptions; + cy?: (cy: Cytoscape.Core) => void; + className?: string; + } + + const CytoscapeComponent: React.FC<CytoscapeComponentProps>; + export default CytoscapeComponent; +} diff --git a/refact-agent/gui/src/utils/__tests__/getMetering.test.ts b/refact-agent/gui/src/utils/__tests__/getMetering.test.ts new file mode 100644 index 000000000..cfab6bfb2 --- /dev/null +++ b/refact-agent/gui/src/utils/__tests__/getMetering.test.ts @@ -0,0 +1,375 @@ +import { describe, it, expect } from "vitest"; +import { + getTotalCostMeteringForMessages, + getTotalTokenMeteringForMessages, +} from "../getMetering"; +import type { ChatMessages } from "../../services/refact/types"; + +type MeteringExtra = { + metering_coins_prompt?: number | string; + metering_coins_generated?: number | string; + metering_coins_cache_creation?: number | string; + metering_coins_cache_read?: number | string; + metering_prompt_tokens_n?: number; + metering_generated_tokens_n?: number; + metering_cache_creation_tokens_n?: number; + metering_cache_read_tokens_n?: number; +}; + +type MessageWithExtra = { + role: "assistant"; + content: string; + usage?: { + completion_tokens: number; + prompt_tokens: number; + total_tokens: number; + }; + tool_calls?: { + id: string; + function: { name: string; arguments: string }; + index: number; + }[]; + metering_coins_prompt?: number; + metering_coins_generated?: number; + metering_coins_cache_creation?: number; + metering_coins_cache_read?: number; + extra?: MeteringExtra; +}; + +type ToolMessageWithExtra = { + role: "tool"; + content: string; + tool_call_id: string; + extra?: MeteringExtra; +}; + +describe("getMetering", () => { + describe("getTotalCostMeteringForMessages", () => { + it("should extract metering from message.extra (new format)", () => { + const messages = [ + { role: "user", content: "Hello" }, + { + role: "assistant", + content: "Hi there", + usage: { + completion_tokens: 10, + prompt_tokens: 20, + total_tokens: 10 + 20, + }, + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }); + }); + + it("should extract metering from direct properties (legacy format)", () => { + const messages = [ + { role: "user", content: "Hello" }, + { + role: "assistant", + content: "Hi there", + usage: { + completion_tokens: 10, + prompt_tokens: 20, + total_tokens: 10 + 20, + }, + metering_coins_prompt: 200, + metering_coins_generated: 100, + metering_coins_cache_creation: 10, + metering_coins_cache_read: 5, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 200, + metering_coins_generated: 100, + metering_coins_cache_creation: 10, + metering_coins_cache_read: 5, + }); + }); + + it("should prefer direct properties over extra (backward compatibility)", () => { + const messages = [ + { + role: "assistant", + content: "Test", + usage: { + completion_tokens: 10, + prompt_tokens: 20, + total_tokens: 10 + 20, + }, + metering_coins_prompt: 300, + metering_coins_generated: 150, + metering_coins_cache_creation: 20, + metering_coins_cache_read: 10, + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 300, + metering_coins_generated: 150, + metering_coins_cache_creation: 20, + metering_coins_cache_read: 10, + }); + }); + + it("should aggregate metering from multiple messages", () => { + const messages = [ + { + role: "assistant", + content: "First", + usage: { + completion_tokens: 10, + prompt_tokens: 20, + total_tokens: 10 + 20, + }, + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies MessageWithExtra, + { role: "user", content: "Follow up" }, + { + role: "assistant", + content: "Second", + usage: { + completion_tokens: 15, + prompt_tokens: 25, + total_tokens: 15 + 25, + }, + extra: { + metering_coins_prompt: 150, + metering_coins_generated: 75, + metering_coins_cache_creation: 10, + metering_coins_cache_read: 5, + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 250, + metering_coins_generated: 125, + metering_coins_cache_creation: 10, + metering_coins_cache_read: 5, + }); + }); + + it("should return null when no messages have metering data", () => { + const messages = [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi" }, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toBeNull(); + }); + + it("should return null for empty messages array", () => { + const result = getTotalCostMeteringForMessages([]); + expect(result).toBeNull(); + }); + + it("should extract metering from tool messages (subagent results)", () => { + const messages = [ + { role: "user", content: "Hello" }, + { + role: "assistant", + content: "Let me delegate this", + tool_calls: [ + { + id: "call_123", + function: { name: "subagent", arguments: "{}" }, + index: 0, + }, + ], + } satisfies MessageWithExtra, + { + role: "tool", + content: "Subagent result", + tool_call_id: "call_123", + extra: { + metering_coins_prompt: 500, + metering_coins_generated: 250, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies ToolMessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 500, + metering_coins_generated: 250, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }); + }); + + it("should aggregate metering from both assistant and tool messages", () => { + const messages = [ + { + role: "assistant", + content: "First response", + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies MessageWithExtra, + { + role: "tool", + content: "Tool result", + tool_call_id: "call_123", + extra: { + metering_coins_prompt: 200, + metering_coins_generated: 100, + metering_coins_cache_creation: 5, + metering_coins_cache_read: 3, + }, + } satisfies ToolMessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 300, + metering_coins_generated: 150, + metering_coins_cache_creation: 5, + metering_coins_cache_read: 3, + }); + }); + + it("should handle string numbers from providers", () => { + const messages = [ + { + role: "assistant", + content: "Test", + extra: { + metering_coins_prompt: "100.5", + metering_coins_generated: "50.25", + metering_coins_cache_creation: "0", + metering_coins_cache_read: "0", + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalCostMeteringForMessages(messages); + + expect(result).toEqual({ + metering_coins_prompt: 100.5, + metering_coins_generated: 50.25, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }); + }); + }); + + describe("getTotalTokenMeteringForMessages", () => { + it("should extract token metering from message.extra", () => { + const messages = [ + { + role: "assistant", + content: "Test", + usage: { + completion_tokens: 10, + prompt_tokens: 20, + total_tokens: 10 + 20, + }, + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + metering_prompt_tokens_n: 1000, + metering_generated_tokens_n: 500, + metering_cache_creation_tokens_n: 0, + metering_cache_read_tokens_n: 0, + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalTokenMeteringForMessages(messages); + + expect(result).toEqual({ + metering_prompt_tokens_n: 1000, + metering_generated_tokens_n: 500, + metering_cache_creation_tokens_n: 0, + metering_cache_read_tokens_n: 0, + }); + }); + + it("should return null when no messages have token metering", () => { + const messages = [ + { + role: "assistant", + content: "Test", + extra: { + metering_coins_prompt: 100, + metering_coins_generated: 50, + metering_coins_cache_creation: 0, + metering_coins_cache_read: 0, + }, + } satisfies MessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalTokenMeteringForMessages(messages); + + expect(result).toBeNull(); + }); + + it("should extract token metering from tool messages", () => { + const messages = [ + { + role: "tool", + content: "Subagent result", + tool_call_id: "call_123", + extra: { + metering_prompt_tokens_n: 2000, + metering_generated_tokens_n: 1000, + metering_cache_creation_tokens_n: 100, + metering_cache_read_tokens_n: 50, + }, + } satisfies ToolMessageWithExtra, + ] as unknown as ChatMessages; + + const result = getTotalTokenMeteringForMessages(messages); + + expect(result).toEqual({ + metering_prompt_tokens_n: 2000, + metering_generated_tokens_n: 1000, + metering_cache_creation_tokens_n: 100, + metering_cache_read_tokens_n: 50, + }); + }); + }); +}); diff --git a/refact-agent/gui/src/utils/__tests__/threadStorage.test.ts b/refact-agent/gui/src/utils/__tests__/threadStorage.test.ts new file mode 100644 index 000000000..4be5cfecc --- /dev/null +++ b/refact-agent/gui/src/utils/__tests__/threadStorage.test.ts @@ -0,0 +1,140 @@ +import { describe, it, expect, beforeEach } from "vitest"; +import { + saveLastThreadParams, + getLastThreadParams, + saveModeParams, + getModeParams, + saveDraftMessage, + getDraftMessage, + clearDraftMessage, + clearAllDraftMessages, + pruneStaleDraftMessages, +} from "../threadStorage"; + +describe("threadStorage", () => { + beforeEach(() => { + localStorage.clear(); + }); + + describe("thread parameters", () => { + it("should save and retrieve thread params per mode", () => { + const params = { + model: "gpt-4", + boost_reasoning: true, + mode: "agent" as const, + }; + + saveLastThreadParams(params); + const retrieved = getLastThreadParams("agent"); + + expect(retrieved.model).toBe("gpt-4"); + expect(retrieved.boost_reasoning).toBe(true); + expect(retrieved.mode).toBe("agent"); + }); + + it("should keep params separate per mode", () => { + saveModeParams("agent", { model: "gpt-4", temperature: 0.7 }); + saveModeParams("explore", { model: "claude-3", temperature: 0.5 }); + + expect(getModeParams("agent").model).toBe("gpt-4"); + expect(getModeParams("agent").temperature).toBe(0.7); + expect(getModeParams("explore").model).toBe("claude-3"); + expect(getModeParams("explore").temperature).toBe(0.5); + }); + + it("should merge with existing params for same mode", () => { + saveModeParams("agent", { model: "gpt-4" }); + saveModeParams("agent", { boost_reasoning: true }); + + const retrieved = getModeParams("agent"); + expect(retrieved).toEqual({ + model: "gpt-4", + boost_reasoning: true, + }); + }); + }); + + describe("draft messages", () => { + it("should save and retrieve draft message", () => { + const threadId = "thread-123"; + const content = "Hello, world!"; + + saveDraftMessage(threadId, content); + const retrieved = getDraftMessage(threadId); + + expect(retrieved).toBe(content); + }); + + it("should retrieve draft immediately after saving (simulating page refresh)", () => { + const threadId = "thread-456"; + const content = "Draft before refresh"; + + saveDraftMessage(threadId, content); + + const retrievedAfterRefresh = getDraftMessage(threadId); + expect(retrievedAfterRefresh).toBe(content); + }); + + it("should return empty string for non-existent draft", () => { + const retrieved = getDraftMessage("non-existent"); + expect(retrieved).toBe(""); + }); + + it("should clear draft when content is empty", () => { + saveDraftMessage("thread-123", "Some content"); + saveDraftMessage("thread-123", ""); + + const retrieved = getDraftMessage("thread-123"); + expect(retrieved).toBe(""); + }); + + it("should clear specific draft message", () => { + saveDraftMessage("thread-1", "Content 1"); + saveDraftMessage("thread-2", "Content 2"); + + clearDraftMessage("thread-1"); + + expect(getDraftMessage("thread-1")).toBe(""); + expect(getDraftMessage("thread-2")).toBe("Content 2"); + }); + + it("should clear all draft messages", () => { + saveDraftMessage("thread-1", "Content 1"); + saveDraftMessage("thread-2", "Content 2"); + + clearAllDraftMessages(); + + expect(getDraftMessage("thread-1")).toBe(""); + expect(getDraftMessage("thread-2")).toBe(""); + }); + + it("should prune stale drafts", () => { + const now = Date.now(); + const eightDaysAgo = now - 8 * 24 * 60 * 60 * 1000; + + localStorage.setItem( + "refact_draft_messages", + JSON.stringify({ + "thread-old": { content: "Old content", timestamp: eightDaysAgo }, + "thread-new": { content: "New content", timestamp: now }, + }), + ); + + pruneStaleDraftMessages(); + + expect(getDraftMessage("thread-old")).toBe(""); + expect(getDraftMessage("thread-new")).toBe("New content"); + }); + + it("should limit draft messages to MAX_DRAFT_MESSAGES", () => { + for (let i = 0; i < 60; i++) { + saveDraftMessage(`thread-${i}`, `Content ${i}`); + } + + const stored = JSON.parse( + localStorage.getItem("refact_draft_messages") ?? "{}", + ) as Record<string, unknown>; + expect(Object.keys(stored).length).toBeLessThanOrEqual(50); + }); + }); +}); diff --git a/refact-agent/gui/src/utils/atCommands/displayUtils.ts b/refact-agent/gui/src/utils/atCommands/displayUtils.ts new file mode 100644 index 000000000..1b0b5f224 --- /dev/null +++ b/refact-agent/gui/src/utils/atCommands/displayUtils.ts @@ -0,0 +1,107 @@ +import type { AtCommandType, AtCommandToken, ChipDisplayInfo } from "./types"; +import { formatLineRange } from "./parseAtCommands"; + +const ICONS: Record<AtCommandType, string> = { + file: "📎", + web: "🌐", + tree: "🌲", + search: "🔍", + definition: "📍", + "knowledge-load": "🧠", + references: "📍", + help: "❓", +}; + +type CommandMeta = { + argRequired: boolean; + clickable: boolean; +}; + +const COMMAND_META: Record<AtCommandType, CommandMeta> = { + file: { argRequired: true, clickable: true }, + web: { argRequired: true, clickable: true }, + tree: { argRequired: false, clickable: false }, + search: { argRequired: true, clickable: false }, + definition: { argRequired: true, clickable: false }, + "knowledge-load": { argRequired: true, clickable: false }, + references: { argRequired: true, clickable: false }, + help: { argRequired: false, clickable: false }, +}; + +export function isCommandDisabled( + token: AtCommandToken, + hostDisabled: boolean, +): boolean { + const meta = COMMAND_META[token.type]; + if (hostDisabled) return true; + if (meta.argRequired && !token.arg) return true; + return false; +} + +export function getFilename(path: string): string { + const parts = path.split(/[/\\]/); + return parts[parts.length - 1] || path; +} + +export function getDomain(url: string): string { + try { + const parsed = new URL(url.startsWith("http") ? url : `https://${url}`); + return parsed.hostname.replace(/^www\./, ""); + } catch { + return url; + } +} + +export function getDisplayLabel( + token: AtCommandToken, + allTokens?: AtCommandToken[], +): string { + const { type, arg, lineRange } = token; + + if (!arg) { + return token.command; + } + + switch (type) { + case "file": { + let filename = getFilename(arg); + if (allTokens) { + const sameNameTokens = allTokens.filter( + (t) => t.type === "file" && t.arg && getFilename(t.arg) === filename, + ); + if (sameNameTokens.length > 1) { + const parts = arg.split(/[/\\]/); + filename = parts.slice(-2).join("/"); + } + } + return lineRange ? `${filename}${formatLineRange(lineRange)}` : filename; + } + case "web": + return getDomain(arg); + case "tree": + return arg ? getFilename(arg) : "tree"; + case "search": + case "definition": + case "references": + case "knowledge-load": + return arg.length > 20 ? arg.slice(0, 20) + "…" : arg; + default: + return token.command; + } +} + +export function tokenToChipInfo( + token: AtCommandToken, + hostDisabled: boolean, + allTokens?: AtCommandToken[], +): ChipDisplayInfo { + return { + type: token.type, + icon: ICONS[token.type], + label: getDisplayLabel(token, allTokens), + fullPath: token.arg, + lineRange: token.lineRange ? formatLineRange(token.lineRange) : undefined, + url: token.type === "web" ? token.arg : undefined, + disabled: isCommandDisabled(token, hostDisabled), + }; +} diff --git a/refact-agent/gui/src/utils/atCommands/index.ts b/refact-agent/gui/src/utils/atCommands/index.ts new file mode 100644 index 000000000..8963c7391 --- /dev/null +++ b/refact-agent/gui/src/utils/atCommands/index.ts @@ -0,0 +1,3 @@ +export * from "./types"; +export * from "./parseAtCommands"; +export * from "./displayUtils"; diff --git a/refact-agent/gui/src/utils/atCommands/parseAtCommands.test.ts b/refact-agent/gui/src/utils/atCommands/parseAtCommands.test.ts new file mode 100644 index 000000000..59a3e22d2 --- /dev/null +++ b/refact-agent/gui/src/utils/atCommands/parseAtCommands.test.ts @@ -0,0 +1,146 @@ +import { describe, it, expect } from "vitest"; +import { + parseLine, + parseLines, + parseLineRange, + formatLineRange, +} from "./parseAtCommands"; + +describe("parseLineRange", () => { + it("parses single line", () => { + const result = parseLineRange("file.ts:10"); + expect(result.path).toBe("file.ts"); + expect(result.lineRange).toEqual({ line1: 10, kind: "single" }); + }); + + it("parses range", () => { + const result = parseLineRange("file.ts:10-20"); + expect(result.path).toBe("file.ts"); + expect(result.lineRange).toEqual({ line1: 10, line2: 20, kind: "range" }); + }); + + it("parses to-end range", () => { + const result = parseLineRange("file.ts:10-"); + expect(result.path).toBe("file.ts"); + expect(result.lineRange).toEqual({ line1: 10, kind: "to-end" }); + }); + + it("parses from-start range", () => { + const result = parseLineRange("file.ts:-20"); + expect(result.path).toBe("file.ts"); + expect(result.lineRange).toEqual({ + line1: 1, + line2: 20, + kind: "from-start", + }); + }); + + it("returns path only when no range", () => { + const result = parseLineRange("file.ts"); + expect(result.path).toBe("file.ts"); + expect(result.lineRange).toBeUndefined(); + }); +}); + +describe("formatLineRange", () => { + it("formats single line", () => { + expect(formatLineRange({ line1: 10, kind: "single" })).toBe(":10"); + }); + + it("formats range", () => { + expect(formatLineRange({ line1: 10, line2: 20, kind: "range" })).toBe( + ":10-20", + ); + }); +}); + +describe("parseLine", () => { + it("parses simple @file command", () => { + const result = parseLine("@file src/main.rs"); + expect(result.tokens).toHaveLength(1); + expect(result.tokens[0]).toMatchObject({ + kind: "at", + type: "file", + arg: "src/main.rs", + }); + }); + + it("parses @file with line range", () => { + const result = parseLine("@file src/main.rs:10-20"); + expect(result.tokens[0]).toMatchObject({ + kind: "at", + type: "file", + arg: "src/main.rs", + lineRange: { line1: 10, line2: 20, kind: "range" }, + }); + }); + + it("parses @web command", () => { + const result = parseLine("@web https://example.com"); + expect(result.tokens[0]).toMatchObject({ + kind: "at", + type: "web", + arg: "https://example.com", + }); + }); + + it("parses mixed text and commands", () => { + const result = parseLine("check @file main.rs @web docs.rs"); + const atTokens = result.tokens.filter((t) => t.kind === "at"); + expect(atTokens).toHaveLength(2); + expect(atTokens[0]).toMatchObject({ + kind: "at", + type: "file", + arg: "main.rs", + }); + expect(atTokens[1]).toMatchObject({ + kind: "at", + type: "web", + arg: "docs.rs", + }); + }); + + it("handles trailing punctuation", () => { + const result = parseLine("@file main.rs, @web example.com!"); + expect(result.tokens[0]).toMatchObject({ kind: "at", arg: "main.rs" }); + expect(result.tokens[2]).toMatchObject({ kind: "at", arg: "example.com" }); + }); + + it("parses @tree without args", () => { + const result = parseLine("@tree"); + expect(result.tokens[0]).toMatchObject({ + kind: "at", + type: "tree", + arg: undefined, + }); + }); + + it("parses @search with multiple words", () => { + const result = parseLine("@search auth bug fix"); + expect(result.tokens[0]).toMatchObject({ + kind: "at", + type: "search", + arg: "auth bug fix", + }); + }); +}); + +describe("parseLines", () => { + it("skips parsing inside code fences", () => { + const text = "before\n```\n@file inside.rs\n```\nafter @file outside.rs"; + const result = parseLines(text); + + expect(result[2].tokens[0]).toMatchObject({ + kind: "text", + text: "@file inside.rs", + }); + expect(result[4].tokens[1]).toMatchObject({ kind: "at", type: "file" }); + }); + + it("handles multiple code fences", () => { + const text = "```\ncode1\n```\n@file test.rs\n```\ncode2\n```"; + const result = parseLines(text); + + expect(result[3].tokens[0]).toMatchObject({ kind: "at", type: "file" }); + }); +}); diff --git a/refact-agent/gui/src/utils/atCommands/parseAtCommands.ts b/refact-agent/gui/src/utils/atCommands/parseAtCommands.ts new file mode 100644 index 000000000..f14f5f844 --- /dev/null +++ b/refact-agent/gui/src/utils/atCommands/parseAtCommands.ts @@ -0,0 +1,212 @@ +import type { + AtCommandType, + AtCommandToken, + Token, + LineRange, + ParsedLine, +} from "./types"; + +const AT_COMMANDS: AtCommandType[] = [ + "file", + "web", + "tree", + "search", + "definition", + "knowledge-load", + "references", + "help", +]; + +const TRAILING_PUNCTUATION = /[!.,?]+$/; + +export function parseLineRange(arg: string): { + path: string; + lineRange?: LineRange; +} { + const rangeMatch = arg.match(/:(\d+)?-(\d+)?$/); + if (rangeMatch) { + const path = arg.replace(/:(\d+)?-(\d+)?$/, ""); + const line1 = rangeMatch[1] ? parseInt(rangeMatch[1], 10) : undefined; + const line2 = rangeMatch[2] ? parseInt(rangeMatch[2], 10) : undefined; + + if (line1 !== undefined && line2 !== undefined) { + return { path, lineRange: { line1, line2, kind: "range" } }; + } else if (line1 !== undefined) { + return { path, lineRange: { line1, kind: "to-end" } }; + } else if (line2 !== undefined) { + return { path, lineRange: { line1: 1, line2, kind: "from-start" } }; + } + } + + const singleMatch = arg.match(/:(\d+)$/); + if (singleMatch) { + const path = arg.replace(/:(\d+)$/, ""); + return { + path, + lineRange: { line1: parseInt(singleMatch[1], 10), kind: "single" }, + }; + } + + return { path: arg }; +} + +export function formatLineRange(lineRange: LineRange): string { + switch (lineRange.kind) { + case "single": + return `:${lineRange.line1}`; + case "range": + return `:${lineRange.line1}-${lineRange.line2}`; + case "to-end": + return `:${lineRange.line1}-`; + case "from-start": + return `:-${lineRange.line2}`; + } +} + +function isAtCommand(word: string): AtCommandType | null { + if (!word.startsWith("@")) return null; + const cmd = word.slice(1).toLowerCase(); + return AT_COMMANDS.find((c) => c === cmd) ?? null; +} + +function parseWords(line: string): [string, number, number][] { + const results: [string, number, number][] = []; + const regex = /@?\S+/g; + let match; + + while ((match = regex.exec(line)) !== null) { + const trimmed = match[0].replace(TRAILING_PUNCTUATION, ""); + if (trimmed.length > 0) { + results.push([trimmed, match.index, match.index + trimmed.length]); + } + } + + return results; +} + +export function parseLine(line: string): ParsedLine { + const tokens: Token[] = []; + const words = parseWords(line); + + let lastEnd = 0; + let i = 0; + + while (i < words.length) { + const [word, startIdx, endIdx] = words[i]; + const cmdType = isAtCommand(word); + + if (cmdType) { + if (startIdx > lastEnd) { + tokens.push({ + kind: "text", + text: line.slice(lastEnd, startIdx), + startIndex: lastEnd, + endIndex: startIdx, + }); + } + + const args: string[] = []; + let argEndIdx = endIdx; + let j = i + 1; + + while (j < words.length) { + const [nextWord, , nextEnd] = words[j]; + if (isAtCommand(nextWord)) break; + args.push(nextWord); + argEndIdx = nextEnd; + j++; + } + + const rawText = line.slice(startIdx, argEndIdx); + const arg = args.length > 0 ? args.join(" ") : undefined; + + const token: AtCommandToken = { + kind: "at", + type: cmdType, + raw: rawText, + command: word, + arg, + startIndex: startIdx, + endIndex: argEndIdx, + }; + + if (cmdType === "file" && arg) { + const { path, lineRange } = parseLineRange(arg); + token.arg = path; + token.lineRange = lineRange; + } + + tokens.push(token); + lastEnd = argEndIdx; + i = j; + } else { + i++; + } + } + + if (lastEnd < line.length) { + tokens.push({ + kind: "text", + text: line.slice(lastEnd), + startIndex: lastEnd, + endIndex: line.length, + }); + } + + if (tokens.length === 0 && line.length > 0) { + tokens.push({ + kind: "text", + text: line, + startIndex: 0, + endIndex: line.length, + }); + } + + return { tokens, originalText: line }; +} + +export function parseLines(text: string): ParsedLine[] { + const lines = text.split("\n"); + const results: ParsedLine[] = []; + let inCodeFence = false; + + for (const line of lines) { + if (line.trimStart().startsWith("```")) { + inCodeFence = !inCodeFence; + results.push({ + tokens: [ + { kind: "text", text: line, startIndex: 0, endIndex: line.length }, + ], + originalText: line, + }); + continue; + } + + if (inCodeFence) { + results.push({ + tokens: [ + { kind: "text", text: line, startIndex: 0, endIndex: line.length }, + ], + originalText: line, + }); + } else { + results.push(parseLine(line)); + } + } + + return results; +} + +export function hasAtCommands(parsedLines: ParsedLine[]): boolean { + return parsedLines.some((line) => + line.tokens.some((token) => token.kind === "at"), + ); +} + +export function getAtCommandTokens( + parsedLines: ParsedLine[], +): AtCommandToken[] { + return parsedLines.flatMap((line) => + line.tokens.filter((token): token is AtCommandToken => token.kind === "at"), + ); +} diff --git a/refact-agent/gui/src/utils/atCommands/types.ts b/refact-agent/gui/src/utils/atCommands/types.ts new file mode 100644 index 000000000..00e96ef33 --- /dev/null +++ b/refact-agent/gui/src/utils/atCommands/types.ts @@ -0,0 +1,51 @@ +export type AtCommandType = + | "file" + | "web" + | "tree" + | "search" + | "definition" + | "knowledge-load" + | "references" + | "help"; + +export type LineRange = { + line1: number; + line2?: number; + kind: "single" | "range" | "from-start" | "to-end"; +}; + +export type AtCommandToken = { + kind: "at"; + type: AtCommandType; + raw: string; + command: string; + arg?: string; + lineRange?: LineRange; + startIndex: number; + endIndex: number; +}; + +export type TextToken = { + kind: "text"; + text: string; + startIndex: number; + endIndex: number; +}; + +export type Token = AtCommandToken | TextToken; + +export type ParsedLine = { + tokens: Token[]; + originalText: string; +}; + +export type ChipDisplayInfo = { + type: AtCommandType; + icon: string; + label: string; + fullPath?: string; + lineRange?: string; + url?: string; + disabled: boolean; + onClick?: () => void; +}; diff --git a/refact-agent/gui/src/utils/backoff.ts b/refact-agent/gui/src/utils/backoff.ts new file mode 100644 index 000000000..abfdccdf6 --- /dev/null +++ b/refact-agent/gui/src/utils/backoff.ts @@ -0,0 +1,25 @@ +export type BackoffOptions = { + baseDelay?: number; + maxDelay?: number; + multiplier?: number; + jitter?: number; +}; + +export function calculateBackoff( + retryCount: number, + options: BackoffOptions = {}, +): number { + const { + baseDelay = 1000, + maxDelay = 30000, + multiplier = 2, + jitter = 0.1, + } = options; + + const delay = Math.min( + baseDelay * Math.pow(multiplier, retryCount), + maxDelay, + ); + const jitterAmount = delay * jitter * (Math.random() * 2 - 1); + return Math.max(0, Math.round(delay + jitterAmount)); +} diff --git a/refact-agent/gui/src/utils/copyChatHistoryToClipboard.ts b/refact-agent/gui/src/utils/copyChatHistoryToClipboard.ts index b1157dcb9..e4b2b5378 100644 --- a/refact-agent/gui/src/utils/copyChatHistoryToClipboard.ts +++ b/refact-agent/gui/src/utils/copyChatHistoryToClipboard.ts @@ -1,8 +1,7 @@ -import type { RootState } from "../app/store"; import { fallbackCopying } from "./fallbackCopying"; export const copyChatHistoryToClipboard = async ( - chatThread: RootState["history"]["thread"], + chatThread: Record<string, unknown>, ): Promise<void> => { const jsonString = JSON.stringify(chatThread, null, 2); diff --git a/refact-agent/gui/src/utils/enrichModels.ts b/refact-agent/gui/src/utils/enrichModels.ts index ba96ff1b0..0e0482640 100644 --- a/refact-agent/gui/src/utils/enrichModels.ts +++ b/refact-agent/gui/src/utils/enrichModels.ts @@ -47,8 +47,9 @@ function extractCapabilities( supportsMultimodality: capsModel.supports_multimodality, supportsClicks: capsModel.supports_clicks, supportsAgent: capsModel.supports_agent, - supportsReasoning: capsModel.supports_reasoning, - supportsBoostReasoning: capsModel.supports_boost_reasoning, + reasoningEffortOptions: capsModel.reasoning_effort_options, + supportsThinkingBudget: capsModel.supports_thinking_budget, + supportsAdaptiveThinkingBudget: capsModel.supports_adaptive_thinking_budget, }; } diff --git a/refact-agent/gui/src/utils/getMetering.ts b/refact-agent/gui/src/utils/getMetering.ts index 9426529f9..638a9ec56 100644 --- a/refact-agent/gui/src/utils/getMetering.ts +++ b/refact-agent/gui/src/utils/getMetering.ts @@ -1,17 +1,62 @@ -import { Usage } from "../services/refact/chat"; import { - AssistantMessage, ChatMessage, ChatMessages, - isAssistantMessage, + MeteringUsd, } from "../services/refact/types"; +import type { Usage } from "../services/refact/chat"; + +type MessageWithExtra = ChatMessage & { + extra?: Record<string, unknown>; +}; + +function parseNumberish(v: unknown): number | undefined { + if (typeof v === "number" && Number.isFinite(v)) return v; + if (typeof v === "string") { + const n = Number(v); + if (Number.isFinite(n)) return n; + } + return undefined; +} + +function getMeteringValue( + message: MessageWithExtra, + field: string, +): number | undefined { + const directValue = (message as unknown as Record<string, unknown>)[field]; + const directNum = parseNumberish(directValue); + if (directNum !== undefined) return directNum; + + const extraNum = parseNumberish(message.extra?.[field]); + if (extraNum !== undefined) return extraNum; + + return undefined; +} + +function hasCoinMetering(message: ChatMessage): boolean { + const m = message as MessageWithExtra; + return ( + getMeteringValue(m, "metering_coins_prompt") !== undefined || + getMeteringValue(m, "metering_coins_generated") !== undefined || + getMeteringValue(m, "metering_coins_cache_creation") !== undefined || + getMeteringValue(m, "metering_coins_cache_read") !== undefined + ); +} + +function hasTokenMetering(message: ChatMessage): boolean { + const m = message as MessageWithExtra; + return ( + getMeteringValue(m, "metering_prompt_tokens_n") !== undefined || + getMeteringValue(m, "metering_generated_tokens_n") !== undefined || + getMeteringValue(m, "metering_cache_creation_tokens_n") !== undefined || + getMeteringValue(m, "metering_cache_read_tokens_n") !== undefined + ); +} -// TODO: cap cost should be in the messages:/ export function getTotalCostMeteringForMessages(messages: ChatMessages) { - const assistantMessages = messages.filter(hasUsageAndPrice); - if (assistantMessages.length === 0) return null; + const meteringMessages = messages.filter(hasCoinMetering); + if (meteringMessages.length === 0) return null; - return assistantMessages.reduce<{ + return meteringMessages.reduce<{ metering_coins_prompt: number; metering_coins_generated: number; metering_coins_cache_creation: number; @@ -20,14 +65,17 @@ export function getTotalCostMeteringForMessages(messages: ChatMessages) { (acc, message) => { return { metering_coins_prompt: - acc.metering_coins_prompt + message.metering_coins_prompt, + acc.metering_coins_prompt + + (getMeteringValue(message, "metering_coins_prompt") ?? 0), metering_coins_generated: - acc.metering_coins_generated + message.metering_coins_generated, + acc.metering_coins_generated + + (getMeteringValue(message, "metering_coins_generated") ?? 0), metering_coins_cache_creation: acc.metering_coins_cache_creation + - message.metering_coins_cache_creation, + (getMeteringValue(message, "metering_coins_cache_creation") ?? 0), metering_coins_cache_read: - acc.metering_coins_cache_read + message.metering_coins_cache_read, + acc.metering_coins_cache_read + + (getMeteringValue(message, "metering_coins_cache_read") ?? 0), }; }, { @@ -40,32 +88,29 @@ export function getTotalCostMeteringForMessages(messages: ChatMessages) { } export function getTotalTokenMeteringForMessages(messages: ChatMessages) { - const assistantMessages = messages.filter(hasUsageAndPrice); - if (assistantMessages.length === 0) return null; + const meteringMessages = messages.filter(hasTokenMetering); + if (meteringMessages.length === 0) return null; - return assistantMessages.reduce<{ + return meteringMessages.reduce<{ metering_prompt_tokens_n: number; metering_generated_tokens_n: number; metering_cache_creation_tokens_n: number; metering_cache_read_tokens_n: number; }>( (acc, message) => { - const { - metering_prompt_tokens_n = 0, - metering_generated_tokens_n = 0, - metering_cache_read_tokens_n = 0, - metering_cache_creation_tokens_n = 0, - } = message; return { metering_prompt_tokens_n: - acc.metering_prompt_tokens_n + metering_prompt_tokens_n, + acc.metering_prompt_tokens_n + + (getMeteringValue(message, "metering_prompt_tokens_n") ?? 0), metering_generated_tokens_n: - acc.metering_generated_tokens_n + metering_generated_tokens_n, + acc.metering_generated_tokens_n + + (getMeteringValue(message, "metering_generated_tokens_n") ?? 0), metering_cache_creation_tokens_n: acc.metering_cache_creation_tokens_n + - metering_cache_creation_tokens_n, + (getMeteringValue(message, "metering_cache_creation_tokens_n") ?? 0), metering_cache_read_tokens_n: - acc.metering_cache_read_tokens_n + metering_cache_read_tokens_n, + acc.metering_cache_read_tokens_n + + (getMeteringValue(message, "metering_cache_read_tokens_n") ?? 0), }; }, { @@ -76,39 +121,43 @@ export function getTotalTokenMeteringForMessages(messages: ChatMessages) { }, ); } -function hasUsageAndPrice(message: ChatMessage): message is AssistantMessage & { - usage: Usage & { - completion_tokens: number; - prompt_tokens: number; - cache_creation_input_tokens?: number; - cache_read_input_tokens?: number; - }; - metering_coins_prompt: number; - metering_coins_generated: number; - metering_coins_cache_creation: number; - metering_coins_cache_read: number; - - metering_prompt_tokens_n?: number; - metering_generated_tokens_n?: number; - metering_cache_creation_tokens_n?: number; - metering_cache_read_tokens_n?: number; -} { - if (!isAssistantMessage(message)) return false; - if (!("usage" in message)) return false; - if (!message.usage) return false; - if (typeof message.usage.completion_tokens !== "number") return false; - if (typeof message.usage.prompt_tokens !== "number") return false; - if (typeof message.metering_coins_prompt !== "number") return false; - if (typeof message.metering_coins_prompt !== "number") return false; - if (typeof message.metering_coins_cache_creation !== "number") return false; - if (typeof message.metering_coins_cache_read !== "number") return false; - - // if (typeof message.metering_prompt_tokens_n !== "number") return false; - // if (typeof message.metering_generated_tokens_n !== "number") return false; - // if (typeof message.metering_cache_creation_tokens_n !== "number") { - // return false; - // } - // if (typeof message.metering_cache_read_tokens_n !== "number") return false; - - return true; + +type MessageWithUsage = ChatMessage & { usage?: Usage }; + +function hasUsdMetering(message: ChatMessage): boolean { + const m = message as MessageWithUsage; + return m.usage?.metering_usd !== undefined; +} + +export function getTotalUsdMeteringForMessages( + messages: ChatMessages, +): MeteringUsd | null { + const meteringMessages = messages.filter(hasUsdMetering); + if (meteringMessages.length === 0) return null; + + return meteringMessages.reduce<MeteringUsd>( + (acc, message) => { + const usd = (message as MessageWithUsage).usage?.metering_usd; + if (!usd) return acc; + return { + prompt_usd: acc.prompt_usd + usd.prompt_usd, + generated_usd: acc.generated_usd + usd.generated_usd, + cache_read_usd: + (acc.cache_read_usd ?? 0) + (usd.cache_read_usd ?? 0) || undefined, + cache_creation_usd: + (acc.cache_creation_usd ?? 0) + (usd.cache_creation_usd ?? 0) || + undefined, + total_usd: acc.total_usd + usd.total_usd, + }; + }, + { prompt_usd: 0, generated_usd: 0, total_usd: 0 }, + ); +} + +export function formatUsd(value: number | undefined): string { + if (value === undefined || !Number.isFinite(value)) return "–"; + if (value >= 0.01) return `$${value.toFixed(2)}`; + if (value >= 0.001) return `$${value.toFixed(3)}`; + if (value > 0) return `$${value.toFixed(4)}`; + return "$0.00"; } diff --git a/refact-agent/gui/src/utils/modeColors.ts b/refact-agent/gui/src/utils/modeColors.ts new file mode 100644 index 000000000..fde0fa96e --- /dev/null +++ b/refact-agent/gui/src/utils/modeColors.ts @@ -0,0 +1,41 @@ +export const MODE_BADGE_COLORS = [ + "gray", + "gold", + "bronze", + "brown", + "yellow", + "amber", + "orange", + "tomato", + "red", + "ruby", + "crimson", + "pink", + "plum", + "purple", + "violet", + "iris", + "indigo", + "blue", + "cyan", + "teal", + "jade", + "green", + "grass", + "lime", + "mint", + "sky", +] as const; + +export type ModeBadgeColor = (typeof MODE_BADGE_COLORS)[number]; + +export function getModeColor(modeId: string | undefined): ModeBadgeColor { + if (!modeId) return "blue"; + let hash = 70; + for (let i = 0; i < modeId.length; i++) { + hash ^= modeId.charCodeAt(i); + hash = Math.imul(hash, 16777619); + } + const index = Math.abs(hash) % MODE_BADGE_COLORS.length; + return MODE_BADGE_COLORS[index]; +} diff --git a/refact-agent/gui/src/utils/sessionStatus.ts b/refact-agent/gui/src/utils/sessionStatus.ts new file mode 100644 index 000000000..1a531e6b1 --- /dev/null +++ b/refact-agent/gui/src/utils/sessionStatus.ts @@ -0,0 +1,52 @@ +import type { StatusDotState } from "../components/StatusDot"; + +export type SessionState = + | "idle" + | "generating" + | "executing_tools" + | "paused" + | "waiting_ide" + | "waiting_user_input" + | "completed" + | "error"; + +export function getStatusFromSessionState( + sessionState?: string | null, +): StatusDotState { + if (sessionState === "generating" || sessionState === "executing_tools") { + return "in_progress"; + } + if ( + sessionState === "paused" || + sessionState === "waiting_ide" || + sessionState === "waiting_user_input" + ) { + return "needs_attention"; + } + if (sessionState === "completed") { + return "completed"; + } + if (sessionState === "error") { + return "error"; + } + return "idle"; +} + +export function getStatusTooltip(sessionState?: string | null): string { + if (sessionState === "generating" || sessionState === "executing_tools") { + return "In progress..."; + } + if (sessionState === "waiting_user_input") { + return "Waiting for your answer"; + } + if (sessionState === "paused" || sessionState === "waiting_ide") { + return "Needs your attention"; + } + if (sessionState === "completed") { + return "Task completed"; + } + if (sessionState === "error") { + return "An error occurred"; + } + return "Idle"; +} diff --git a/refact-agent/gui/src/utils/test-setup.ts b/refact-agent/gui/src/utils/test-setup.ts index 4bbf02fbe..c93d65f85 100644 --- a/refact-agent/gui/src/utils/test-setup.ts +++ b/refact-agent/gui/src/utils/test-setup.ts @@ -8,19 +8,30 @@ import MatchMediaMock from "vitest-matchmedia-mock"; import React from "react"; const matchMediaMock = new MatchMediaMock(); +(globalThis as Record<string, unknown>).__REFACT_LSP_PORT__ = 8001; + beforeAll(() => { stubResizeObserver(); stubIntersectionObserver(); Element.prototype.scrollIntoView = vi.fn(); // Mock localStorage for tests + const storage = new Map<string, string>(); const localStorageMock: Storage = { - getItem: vi.fn(() => null), - setItem: vi.fn(), - removeItem: vi.fn(), - clear: vi.fn(), - key: vi.fn(() => null), - length: 0, + getItem: (key: string) => storage.get(key) ?? null, + setItem: (key: string, value: string) => { + storage.set(key, value); + }, + removeItem: (key: string) => { + storage.delete(key); + }, + clear: () => { + storage.clear(); + }, + key: (index: number) => Array.from(storage.keys())[index] ?? null, + get length() { + return storage.size; + }, }; global.localStorage = localStorageMock; }); diff --git a/refact-agent/gui/src/utils/test-utils.tsx b/refact-agent/gui/src/utils/test-utils.tsx index ba3176a6a..935098321 100644 --- a/refact-agent/gui/src/utils/test-utils.tsx +++ b/refact-agent/gui/src/utils/test-utils.tsx @@ -6,8 +6,61 @@ import userEvent from "@testing-library/user-event"; import { Theme } from "@radix-ui/themes"; import { Provider } from "react-redux"; import { AppStore, RootState, setUpStore } from "../app/store"; -import { TourProvider } from "../features/Tour"; import { AbortControllerProvider } from "../contexts/AbortControllers"; +import { v4 as uuidv4 } from "uuid"; +import type { ChatThreadRuntime } from "../features/Chat/Thread/types"; + +// Helper to create a default thread runtime for tests +const createTestThreadRuntime = (): ChatThreadRuntime => { + return { + thread: { + id: uuidv4(), + messages: [], + title: "", + model: "", + last_user_message_id: "", + tool_use: "explore", + new_chat_suggested: { wasSuggested: false }, + boost_reasoning: false, + + increase_max_tokens: false, + include_project_info: true, + context_tokens_cap: undefined, + }, + streaming: false, + waiting_for_response: false, + prevent_send: false, + error: null, + queued_items: [], + send_immediately: false, + attached_images: [], + attached_text_files: [], + confirmation: { + pause: false, + pause_reasons: [], + status: { + wasInteracted: false, + confirmationStatus: true, + }, + }, + snapshot_received: true, + task_widget_expanded: false, + }; +}; + +// Helper to create default chat state with a thread +export const createDefaultChatState = () => { + const runtime = createTestThreadRuntime(); + return { + current_thread_id: runtime.thread.id, + open_thread_ids: [runtime.thread.id], + threads: { [runtime.thread.id]: runtime }, + system_prompt: {}, + tool_use: "explore" as const, + sse_refresh_requested: null, + stream_version: 0, + }; +}; // This type interface extends the default options for render from RTL, as well // as allows the user to specify other things such as initialState, store. @@ -26,8 +79,8 @@ const customRender = ( preloadedState, // Automatically create a store instance if no store was passed in store = setUpStore({ - // @ts-expect-error finished - tour: { type: "finished", step: 0 }, + // Provide default chat state with a thread for tests + chat: createDefaultChatState(), ...preloadedState, }), ...renderOptions @@ -36,9 +89,7 @@ const customRender = ( const Wrapper = ({ children }: PropsWithChildren) => ( <Provider store={store}> <Theme> - <TourProvider> - <AbortControllerProvider>{children}</AbortControllerProvider> - </TourProvider> + <AbortControllerProvider>{children}</AbortControllerProvider> </Theme> </Provider> ); diff --git a/refact-agent/gui/src/utils/threadStorage.ts b/refact-agent/gui/src/utils/threadStorage.ts new file mode 100644 index 000000000..268390c3e --- /dev/null +++ b/refact-agent/gui/src/utils/threadStorage.ts @@ -0,0 +1,181 @@ +import { LspChatMode, ReasoningEffort } from "../features/Chat/Thread/types"; +import { SystemPrompts } from "../services/refact/prompts"; + +const MODE_PARAMS_KEY_PREFIX = "refact_mode_params_"; +const DRAFT_MESSAGES_KEY = "refact_draft_messages"; +const MAX_DRAFT_MESSAGES = 50; + +export interface PersistedModeParams { + model?: string; + boost_reasoning?: boolean; + reasoning_effort?: ReasoningEffort; + thinking_budget?: number; + temperature?: number; + max_tokens?: number; + increase_max_tokens?: boolean; + include_project_info?: boolean; + context_tokens_cap?: number; + system_prompt?: SystemPrompts; + checkpoints_enabled?: boolean; + follow_ups_enabled?: boolean; +} + +export interface PersistedThreadParams extends PersistedModeParams { + mode?: LspChatMode; +} + +type DraftMessagesStorage = Partial< + Record< + string, + { + content: string; + timestamp: number; + } + > +>; + +function getModeKey(mode: LspChatMode): string { + return `${MODE_PARAMS_KEY_PREFIX}${mode}`; +} + +export function saveModeParams( + mode: LspChatMode, + params: Partial<PersistedModeParams>, +): void { + try { + if (typeof localStorage === "undefined") return; + const existing = getModeParams(mode); + const merged = { ...existing, ...params }; + localStorage.setItem(getModeKey(mode), JSON.stringify(merged)); + } catch { + // Silent fail + } +} + +export function getModeParams(mode: LspChatMode): Partial<PersistedModeParams> { + try { + if (typeof localStorage === "undefined") return {}; + const stored = localStorage.getItem(getModeKey(mode)); + if (!stored) return {}; + return JSON.parse(stored) as Partial<PersistedModeParams>; + } catch { + return {}; + } +} + +export function getLastThreadParams( + mode?: LspChatMode, +): Partial<PersistedThreadParams> { + const defaultMode = mode ?? "agent"; + const modeParams = getModeParams(defaultMode); + return { ...modeParams, mode: defaultMode }; +} + +export function saveLastThreadParams( + params: Partial<PersistedThreadParams>, +): void { + const mode = params.mode ?? "agent"; + const { mode: _, ...modeParams } = params; + saveModeParams(mode, modeParams); +} + +function loadDraftMessagesStorage(): DraftMessagesStorage { + try { + if (typeof localStorage === "undefined") return {}; + const stored = localStorage.getItem(DRAFT_MESSAGES_KEY); + if (!stored) return {}; + return JSON.parse(stored) as DraftMessagesStorage; + } catch { + return {}; + } +} + +function saveDraftMessagesStorage(storage: DraftMessagesStorage): void { + try { + if (typeof localStorage === "undefined") return; + const entries = Object.entries(storage).filter( + (entry): entry is [string, { content: string; timestamp: number }] => + entry[1] !== undefined, + ); + if (entries.length > MAX_DRAFT_MESSAGES) { + const sorted = entries.sort((a, b) => b[1].timestamp - a[1].timestamp); + const pruned = Object.fromEntries(sorted.slice(0, MAX_DRAFT_MESSAGES)); + localStorage.setItem(DRAFT_MESSAGES_KEY, JSON.stringify(pruned)); + } else { + localStorage.setItem(DRAFT_MESSAGES_KEY, JSON.stringify(storage)); + } + } catch { + // Silent fail + } +} + +export function saveDraftMessage(threadId: string, content: string): void { + try { + if (!threadId) return; + const storage = loadDraftMessagesStorage(); + if (!content.trim()) { + const { [threadId]: _, ...rest } = storage; + saveDraftMessagesStorage(rest); + } else { + storage[threadId] = { content, timestamp: Date.now() }; + saveDraftMessagesStorage(storage); + } + } catch { + // Silent fail + } +} + +export function getDraftMessage(threadId: string): string { + try { + if (!threadId) return ""; + const storage = loadDraftMessagesStorage(); + return storage[threadId]?.content ?? ""; + } catch { + return ""; + } +} + +export function clearDraftMessage(threadId: string): void { + try { + if (!threadId) return; + const storage = loadDraftMessagesStorage(); + const { [threadId]: _, ...rest } = storage; + saveDraftMessagesStorage(rest); + } catch { + // Silent fail + } +} + +export function clearAllDraftMessages(): void { + try { + if (typeof localStorage === "undefined") return; + localStorage.removeItem(DRAFT_MESSAGES_KEY); + } catch { + // Silent fail + } +} + +export function pruneStaleDraftMessages(): void { + try { + const storage = loadDraftMessagesStorage(); + const sevenDaysAgo = Date.now() - 7 * 24 * 60 * 60 * 1000; + const pruned: DraftMessagesStorage = {}; + let didPrune = false; + for (const [threadId, draft] of Object.entries(storage)) { + if (!draft) { + didPrune = true; + continue; + } + if (draft.timestamp > sevenDaysAgo) { + pruned[threadId] = draft; + } else { + didPrune = true; + } + } + if (didPrune) { + saveDraftMessagesStorage(pruned); + } + } catch { + // Silent fail + } +} diff --git a/refact-agent/gui/vite.config.ts b/refact-agent/gui/vite.config.ts index 07e7281c2..f07a1972b 100644 --- a/refact-agent/gui/vite.config.ts +++ b/refact-agent/gui/vite.config.ts @@ -9,7 +9,27 @@ import dts from "vite-plugin-dts"; import { execSync } from "child_process"; -const commitHash = execSync("git rev-parse --short HEAD").toString().trim(); +function resolveCommitHash(): string { + const envSha = + process.env.GITHUB_SHA ?? + process.env.CI_COMMIT_SHA ?? + process.env.VERCEL_GIT_COMMIT_SHA ?? + process.env.BUILD_VCS_NUMBER; + + if (envSha && envSha.length >= 7) return envSha.slice(0, 7); + + try { + return execSync("git rev-parse --short HEAD", { + stdio: ["ignore", "pipe", "ignore"], + }) + .toString() + .trim(); + } catch { + return "unknown"; + } +} + +const commitHash = resolveCommitHash(); // TODO: remove extra compile step when vscode can run esmodules https://github.com/microsoft/vscode/issues/130367 diff --git a/test_tempfile.rs b/test_tempfile.rs new file mode 100644 index 000000000..c66ef48d6 --- /dev/null +++ b/test_tempfile.rs @@ -0,0 +1,27 @@ +use std::io::Write; +use std::path::PathBuf; +use tempfile::NamedTempFile; + +fn main() { + let dir = PathBuf::from("/tmp"); + let target = dir.join("test_target.txt"); + + let content = "Test content"; + + let mut tmp_file = NamedTempFile::new_in(&dir).expect("Failed to create temp file"); + tmp_file.write_all(content.as_bytes()).expect("Failed to write"); + tmp_file.flush().expect("Failed to flush"); + + println!("Temp file created: {:?}", tmp_file.path()); + + tmp_file.persist(&target).expect("Failed to persist"); + + println!("File persisted to: {:?}", target); + + let read_content = std::fs::read_to_string(&target).expect("Failed to read"); + assert_eq!(read_content, content); + + std::fs::remove_file(&target).ok(); + + println!("Test passed!"); +}