Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/logsqueak/tui/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,7 @@ def transition_to_phase2(self, selected_blocks: List[BlockState]) -> None:
graph_paths=graph_paths,
llm_client=self.llm_client,
rag_search=self.rag_search,
config=self.config,
auto_start_workers=True,
name="phase2",
)
Expand Down
11 changes: 9 additions & 2 deletions src/logsqueak/tui/screens/content_editing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
Users can see LLM-suggested rewordings, manually edit content, and review RAG search results.
"""

from typing import Optional, Dict
from typing import Optional, Dict, Any
import asyncio
from textual.app import ComposeResult
from textual.screen import Screen
Expand Down Expand Up @@ -122,6 +122,7 @@ def __init__(
graph_paths: GraphPaths,
llm_client: Optional[LLMClient] = None,
rag_search: Optional[RAGSearch] = None,
config: Optional[Any] = None,
auto_start_workers: bool = True,
**kwargs
):
Expand All @@ -134,6 +135,7 @@ def __init__(
graph_paths: GraphPaths instance for loading page contents
llm_client: LLM client instance (None for testing)
rag_search: RAG search service instance (None for testing)
config: Application config (for RAG top_k, etc.)
auto_start_workers: Whether to auto-start background workers (default True)
"""
super().__init__(**kwargs)
Expand All @@ -143,6 +145,7 @@ def __init__(
self.graph_paths = graph_paths
self.llm_client = llm_client
self.rag_search = rag_search
self.config = config
self.auto_start_workers = auto_start_workers

# Map block_id to EditedContent for quick lookup
Expand Down Expand Up @@ -894,11 +897,15 @@ async def _rag_search_worker(self) -> None:
}

# Find candidate chunks (returns hierarchical chunks + frontmatter from ChromaDB)
top_k = 10 # Default fallback for tests
if self.config and hasattr(self.config, 'rag') and hasattr(self.config.rag, 'top_k'):
top_k = self.config.rag.top_k

rag_results = await self.rag_search.find_candidates(
edited_content=self.edited_content,
original_contexts=original_contexts,
graph_paths=self.graph_paths,
top_k=10 # TODO: Get from config
top_k=top_k
)

# Unpack results: chunks per block and page frontmatter
Expand Down
37 changes: 37 additions & 0 deletions tests/ui/test_phase2_rag_blocking.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,3 +327,40 @@ async def test_rag_search_error_shows_message(sample_blocks, sample_edited_conte
# Error message should be displayed
assert screen.rag_search_state == BackgroundTaskState.FAILED
assert screen.rag_search_error is not None


def test_rag_config_top_k_extraction():
"""Test that top_k is correctly extracted from config."""
from logsqueak.models.config import Config, LogseqConfig, LLMConfig, RAGConfig
from pathlib import Path
import tempfile

with tempfile.TemporaryDirectory() as tmpdir:
# Create config with custom top_k
config = Config(
logseq=LogseqConfig(graph_path=tmpdir),
llm=LLMConfig(endpoint="http://fake", model="fake", api_key="fake"),
rag=RAGConfig(top_k=25)
)

# Verify the config has the right value
assert config.rag.top_k == 25

# Simulate the extraction logic used in _rag_search_worker
top_k = 10 # Default fallback
if config and hasattr(config, 'rag') and hasattr(config.rag, 'top_k'):
top_k = config.rag.top_k

assert top_k == 25


def test_rag_default_top_k_without_config():
"""Test that top_k defaults to 10 when config not provided."""
config = None

# Simulate the extraction logic used in _rag_search_worker
top_k = 10 # Default fallback
if config and hasattr(config, 'rag') and hasattr(config.rag, 'top_k'):
top_k = config.rag.top_k

assert top_k == 10