diff --git a/README.md b/README.md index 34027891..a93dcd55 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Chronicle +# Chronicle (fork from https://github.com/chronicler-ai/chronicle) Self-hostable AI system that captures audio/video data from OMI devices and other sources to generate memories, action items, and contextual insights about your conversations and daily interactions. diff --git a/backends/advanced/src/advanced_omi_backend/app_factory.py b/backends/advanced/src/advanced_omi_backend/app_factory.py index 7ccda184..fdde55de 100644 --- a/backends/advanced/src/advanced_omi_backend/app_factory.py +++ b/backends/advanced/src/advanced_omi_backend/app_factory.py @@ -37,6 +37,7 @@ from advanced_omi_backend.routers.modules.websocket_routes import router as websocket_router from advanced_omi_backend.services.audio_service import get_audio_stream_service from advanced_omi_backend.task_manager import init_task_manager, get_task_manager +from advanced_omi_backend.services.mcp_server import setup_mcp_server logger = logging.getLogger(__name__) application_logger = logging.getLogger("audio_processing") @@ -66,6 +67,16 @@ async def lifespan(app: FastAPI): application_logger.error(f"Failed to initialize Beanie: {e}") raise + # Initialize settings manager + try: + from advanced_omi_backend.settings_manager import init_settings_manager + settings_mgr = init_settings_manager(config.db) + await settings_mgr.initialize() + application_logger.info("✅ Settings manager initialized and loaded from environment/database") + except Exception as e: + application_logger.error(f"Failed to initialize settings manager: {e}") + # Don't raise - use fallback to environment variables if settings manager fails + # Create admin user if needed try: await create_admin_user_if_needed() @@ -205,6 +216,10 @@ def create_app() -> FastAPI: tags=["users"], ) + # Setup MCP server for conversation access + setup_mcp_server(app) + logger.info("MCP server configured for conversation access") + # Mount static files LAST (mounts are catch-all patterns) CHUNK_DIR = Path("/app/audio_chunks") app.mount("/audio", StaticFiles(directory=CHUNK_DIR), name="audio") diff --git a/backends/advanced/src/advanced_omi_backend/models/user.py b/backends/advanced/src/advanced_omi_backend/models/user.py index b0ced195..7998c5b3 100644 --- a/backends/advanced/src/advanced_omi_backend/models/user.py +++ b/backends/advanced/src/advanced_omi_backend/models/user.py @@ -25,6 +25,8 @@ class UserRead(BaseUser[PydanticObjectId]): display_name: Optional[str] = None registered_clients: dict[str, dict] = Field(default_factory=dict) primary_speakers: list[dict] = Field(default_factory=list) + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class UserUpdate(BaseUserUpdate): @@ -62,6 +64,9 @@ class User(BeanieBaseUser, Document): registered_clients: dict[str, dict] = Field(default_factory=dict) # Speaker processing filter configuration primary_speakers: list[dict] = Field(default_factory=list) + # API key for MCP access + api_key: Optional[str] = None + api_key_created_at: Optional[datetime] = None class Settings: name = "users" # Collection name in MongoDB - standardized from "fastapi_users" diff --git a/backends/advanced/src/advanced_omi_backend/routers/api_router.py b/backends/advanced/src/advanced_omi_backend/routers/api_router.py index 528713c0..e6abfe48 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/api_router.py +++ b/backends/advanced/src/advanced_omi_backend/routers/api_router.py @@ -16,6 +16,7 @@ conversation_router, memory_router, queue_router, + settings_router, system_router, user_router, ) @@ -34,6 +35,7 @@ router.include_router(client_router) router.include_router(conversation_router) router.include_router(memory_router) +router.include_router(settings_router) router.include_router(system_router) router.include_router(queue_router) router.include_router(health_router) # Also include under /api for frontend compatibility diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py index a5669b06..2cda0884 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/__init__.py @@ -12,6 +12,7 @@ - audio_routes: Audio file uploads and processing - health_routes: Health check endpoints - websocket_routes: WebSocket connection handling +- settings_routes: Application settings management """ from .audio_routes import router as audio_router @@ -21,6 +22,7 @@ from .health_routes import router as health_router from .memory_routes import router as memory_router from .queue_routes import router as queue_router +from .settings_routes import router as settings_router from .system_routes import router as system_router from .user_routes import router as user_router from .websocket_routes import router as websocket_router @@ -33,6 +35,7 @@ "health_router", "memory_router", "queue_router", + "settings_router", "system_router", "user_router", "websocket_router", diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py new file mode 100644 index 00000000..3da73c4d --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/settings_routes.py @@ -0,0 +1,564 @@ +""" +Application settings management routes. + +Provides endpoints for reading and updating dynamic application settings. +Settings changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging + +from fastapi import APIRouter, Depends, HTTPException + +from advanced_omi_backend.auth import current_active_user, current_superuser +from advanced_omi_backend.settings_manager import get_settings_manager, SettingsManager +from advanced_omi_backend.settings_models import ( + AllSettings, + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, +) +from advanced_omi_backend.users import User + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/settings", tags=["settings"]) + + +# All Settings (Combined) + + +@router.get("", response_model=AllSettings) +async def get_all_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Get all application settings. + + Available to all authenticated users for read access. + """ + return await settings_mgr.get_all_settings() + + +@router.put("", response_model=AllSettings) +async def update_all_settings( + settings: AllSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update all application settings at once. + + Admin only. Changes take effect within the cache TTL. + """ + await settings_mgr.update_all_settings(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_all_settings() + + +# Speech Detection Settings + + +@router.get("/speech-detection", response_model=SpeechDetectionSettings) +async def get_speech_detection_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speech detection settings.""" + return await settings_mgr.get_speech_detection() + + +@router.put("/speech-detection", response_model=SpeechDetectionSettings) +async def update_speech_detection_settings( + settings: SpeechDetectionSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speech detection settings. Admin only. + + These settings control when audio sessions are converted to conversations. + """ + await settings_mgr.update_speech_detection(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_speech_detection() + + +# Conversation Settings + + +@router.get("/conversation", response_model=ConversationSettings) +async def get_conversation_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get conversation management settings.""" + return await settings_mgr.get_conversation() + + +@router.put("/conversation", response_model=ConversationSettings) +async def update_conversation_settings( + settings: ConversationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update conversation management settings. Admin only. + + Controls conversation timeouts, transcription buffering, and speaker enrollment. + """ + await settings_mgr.update_conversation(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_conversation() + + +# Audio Processing Settings + + +@router.get("/audio-processing", response_model=AudioProcessingSettings) +async def get_audio_processing_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get audio processing settings.""" + return await settings_mgr.get_audio_processing() + + +@router.put("/audio-processing", response_model=AudioProcessingSettings) +async def update_audio_processing_settings( + settings: AudioProcessingSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update audio processing settings. Admin only. + + Controls audio cropping, silence removal, and segment duration. + """ + await settings_mgr.update_audio_processing(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_audio_processing() + + +# Diarization Settings + + +@router.get("/diarization", response_model=DiarizationSettings) +async def get_diarization_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get speaker diarization settings.""" + return await settings_mgr.get_diarization() + + +@router.put("/diarization", response_model=DiarizationSettings) +async def update_diarization_settings( + settings: DiarizationSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update speaker diarization settings. Admin only. + + Controls how speakers are identified and segments are separated. + """ + await settings_mgr.update_diarization(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_diarization() + + +# LLM Settings + + +@router.get("/llm", response_model=LLMSettings) +async def get_llm_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get LLM provider and model settings.""" + return await settings_mgr.get_llm() + + +@router.put("/llm", response_model=LLMSettings) +async def update_llm_settings( + settings: LLMSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update LLM settings. Admin only. + + Controls which LLM provider and models to use for processing and chat. + """ + await settings_mgr.update_llm(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_llm() + + +# Provider Settings + + +@router.get("/providers", response_model=ProviderSettings) +async def get_provider_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get service provider settings.""" + return await settings_mgr.get_providers() + + +@router.put("/providers", response_model=ProviderSettings) +async def update_provider_settings( + settings: ProviderSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update service provider settings. Admin only. + + Controls which memory and transcription providers to use. + """ + await settings_mgr.update_providers(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_providers() + + +# Network Settings + + +@router.get("/network", response_model=NetworkSettings) +async def get_network_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get network and public access settings.""" + return await settings_mgr.get_network() + + +@router.put("/network", response_model=NetworkSettings) +async def update_network_settings( + settings: NetworkSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update network settings. Admin only. + + Controls public endpoints, CORS, and network access configuration. + """ + await settings_mgr.update_network(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_network() + + +# Infrastructure Settings + + +@router.get("/infrastructure", response_model=InfrastructureSettings) +async def get_infrastructure_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get infrastructure settings.""" + return await settings_mgr.get_infrastructure() + + +@router.put("/infrastructure", response_model=InfrastructureSettings) +async def update_infrastructure_settings( + settings: InfrastructureSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update infrastructure settings. Admin only. + + Controls MongoDB, Redis, Qdrant, and Neo4j connection settings. + """ + await settings_mgr.update_infrastructure(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_infrastructure() + + +# Miscellaneous Settings + + +@router.get("/misc", response_model=MiscSettings) +async def get_misc_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get miscellaneous settings.""" + return await settings_mgr.get_misc() + + +@router.put("/misc", response_model=MiscSettings) +async def update_misc_settings( + settings: MiscSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update miscellaneous settings. Admin only. + + Controls debug options and telemetry. + """ + await settings_mgr.update_misc(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_misc() + + +# API Keys Settings + + +@router.get("/api-keys", response_model=ApiKeysSettings) +async def get_api_keys_settings( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """Get API keys settings.""" + return await settings_mgr.get_api_keys() + + +@router.put("/api-keys", response_model=ApiKeysSettings) +async def update_api_keys_settings( + settings: ApiKeysSettings, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Update API keys settings. Admin only. + + Controls external service API keys. + """ + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + return await settings_mgr.get_api_keys() + + +@router.get("/api-keys/load-from-file", response_model=ApiKeysSettings) +async def load_api_keys_from_file( + file_path: str = ".env.api-keys", + current_user: User = Depends(current_superuser), +): + """ + Load API keys from a file. Admin only. + + Args: + file_path: Path to the API keys file (default: .env.api-keys) + + Returns: + API keys loaded from the file + """ + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + try: + keys_dict = read_api_keys_from_file(file_path) + return ApiKeysSettings(**keys_dict) + except Exception as e: + logger.error(f"Error loading API keys from file {file_path}: {e}") + raise HTTPException( + status_code=500, + detail=f"Failed to load API keys from {file_path}: {str(e)}" + ) + + +@router.post("/api-keys/save") +async def save_api_keys( + settings: ApiKeysSettings, + save_to_file: bool = True, + save_to_database: bool = True, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Save API keys to file and/or database. Admin only. + + Args: + settings: API keys to save + save_to_file: Save to .env.api-keys file (default: True) + save_to_database: Save to MongoDB (default: True) + """ + from advanced_omi_backend.utils.api_keys_manager import write_api_keys_to_file + + results = {"file": False, "database": False, "errors": []} + + # Save to file + if save_to_file: + try: + keys_dict = { + "openai_api_key": settings.openai_api_key, + "deepgram_api_key": settings.deepgram_api_key, + "mistral_api_key": settings.mistral_api_key, + "hf_token": settings.hf_token, + "langfuse_public_key": settings.langfuse_public_key, + "langfuse_secret_key": settings.langfuse_secret_key, + "ngrok_authtoken": settings.ngrok_authtoken, + } + success = write_api_keys_to_file(keys_dict, ".env.api-keys") + results["file"] = success + if not success: + results["errors"].append("Failed to write to .env.api-keys file") + except Exception as e: + logger.error(f"Error writing API keys to file: {e}") + results["errors"].append(f"File write error: {str(e)}") + + # Save to database + if save_to_database: + try: + await settings_mgr.update_api_keys(settings, updated_by=str(current_user.id)) + results["database"] = True + except Exception as e: + logger.error(f"Error saving API keys to database: {e}") + results["errors"].append(f"Database save error: {str(e)}") + + return { + "success": results["file"] or results["database"], + "saved_to": { + "file": results["file"], + "database": results["database"], + }, + "errors": results["errors"], + "settings": await settings_mgr.get_api_keys(), + } + + +# Cache Management + + +@router.post("/cache/invalidate") +async def invalidate_settings_cache( + category: str = None, + current_user: User = Depends(current_superuser), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Invalidate settings cache. Admin only. + + Forces settings to reload from database on next access. + If category is provided, only invalidates that category. + """ + settings_mgr.invalidate_cache(category) + return { + "status": "success", + "message": f"Cache invalidated for {category if category else 'all settings'}", + } + + +# Infrastructure Status + + +@router.get("/infrastructure/status") +async def get_infrastructure_status( + current_user: User = Depends(current_active_user), + settings_mgr: SettingsManager = Depends(get_settings_manager), +): + """ + Get infrastructure service connection status. + + Returns URLs and connection status for MongoDB, Redis, Qdrant, Neo4j. + Uses editable settings from database. + """ + from advanced_omi_backend.app_config import get_app_config + + # Get infrastructure settings from database + infra_settings = await settings_mgr.get_infrastructure() + config = get_app_config() + + status = { + "mongodb": { + "url": infra_settings.mongodb_uri, + "database": infra_settings.mongodb_database, + "connected": False, + }, + "redis": { + "url": infra_settings.redis_url, + "connected": False, + }, + "qdrant": { + "url": f"http://{infra_settings.qdrant_base_url}:{infra_settings.qdrant_port}", + "connected": False, + }, + "neo4j": { + "host": infra_settings.neo4j_host, + "user": infra_settings.neo4j_user, + "connected": False, + }, + } + + # Check MongoDB + try: + await config.mongo_client.admin.command('ping') + status["mongodb"]["connected"] = True + except Exception as e: + logger.debug(f"MongoDB connection check failed: {e}") + + # Check Redis + try: + from advanced_omi_backend.controllers.queue_controller import redis_conn + redis_conn.ping() + status["redis"]["connected"] = True + except Exception as e: + logger.debug(f"Redis connection check failed: {e}") + + # Check Qdrant + try: + import httpx + async with httpx.AsyncClient() as client: + response = await client.get(f"{status['qdrant']['url']}/", timeout=2.0) + status["qdrant"]["connected"] = response.status_code == 200 + except Exception as e: + logger.debug(f"Qdrant connection check failed: {e}") + + # Neo4j check (optional service) + # We don't check Neo4j connection as it's optional and may not be configured + + return status + + +@router.get("/api-keys/status") +async def get_api_keys_status( + current_user: User = Depends(current_superuser), +): + """ + Get API keys configuration status. Admin only. + + Returns which API keys are configured (but not the actual keys). + """ + import os + + keys_status = { + "openai": { + "name": "OpenAI API Key", + "configured": bool(os.getenv("OPENAI_API_KEY")), + "env_var": "OPENAI_API_KEY", + }, + "deepgram": { + "name": "Deepgram API Key", + "configured": bool(os.getenv("DEEPGRAM_API_KEY")), + "env_var": "DEEPGRAM_API_KEY", + }, + "mistral": { + "name": "Mistral API Key", + "configured": bool(os.getenv("MISTRAL_API_KEY")), + "env_var": "MISTRAL_API_KEY", + }, + "hf_token": { + "name": "HuggingFace Token", + "configured": bool(os.getenv("HF_TOKEN")), + "env_var": "HF_TOKEN", + }, + "langfuse_public": { + "name": "Langfuse Public Key", + "configured": bool(os.getenv("LANGFUSE_PUBLIC_KEY")), + "env_var": "LANGFUSE_PUBLIC_KEY", + }, + "langfuse_secret": { + "name": "Langfuse Secret Key", + "configured": bool(os.getenv("LANGFUSE_SECRET_KEY")), + "env_var": "LANGFUSE_SECRET_KEY", + }, + "ngrok": { + "name": "Ngrok Auth Token", + "configured": bool(os.getenv("NGROK_AUTHTOKEN")), + "env_var": "NGROK_AUTHTOKEN", + }, + } + + return keys_status diff --git a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py index 12ed5c63..233ddd68 100644 --- a/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py +++ b/backends/advanced/src/advanced_omi_backend/routers/modules/user_routes.py @@ -5,10 +5,12 @@ """ import logging +import secrets +from datetime import UTC, datetime -from fastapi import APIRouter, Depends +from fastapi import APIRouter, Depends, HTTPException -from advanced_omi_backend.auth import current_superuser +from advanced_omi_backend.auth import current_active_user, current_superuser from advanced_omi_backend.controllers import user_controller from advanced_omi_backend.users import User, UserCreate, UserUpdate @@ -44,3 +46,42 @@ async def delete_user( ): """Delete a user and optionally their associated data. Admin only.""" return await user_controller.delete_user(user_id, delete_conversations, delete_memories) + + +@router.post("/me/api-key") +async def generate_api_key(current_user: User = Depends(current_active_user)): + """Generate a new API key for the current user.""" + try: + # Generate a secure random API key (32 bytes = 64 hex characters) + new_api_key = secrets.token_urlsafe(32) + + # Update user with new API key + current_user.api_key = new_api_key + current_user.api_key_created_at = datetime.now(UTC) + await current_user.save() + + logger.info(f"Generated new API key for user {current_user.id}") + + return { + "api_key": new_api_key, + "created_at": current_user.api_key_created_at.isoformat() + } + except Exception as e: + logger.error(f"Failed to generate API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to generate API key") + + +@router.delete("/me/api-key") +async def revoke_api_key(current_user: User = Depends(current_active_user)): + """Revoke the current user's API key.""" + try: + current_user.api_key = None + current_user.api_key_created_at = None + await current_user.save() + + logger.info(f"Revoked API key for user {current_user.id}") + + return {"status": "success", "message": "API key revoked"} + except Exception as e: + logger.error(f"Failed to revoke API key for user {current_user.id}: {e}") + raise HTTPException(status_code=500, detail="Failed to revoke API key") diff --git a/backends/advanced/src/advanced_omi_backend/services/mcp_server.py b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py new file mode 100644 index 00000000..27288599 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/services/mcp_server.py @@ -0,0 +1,532 @@ +""" +MCP Server for Friend-Lite conversations. + +This module implements an MCP (Model Context Protocol) server that provides +conversation access tools for LLMs to retrieve conversation data, transcripts, +and audio files. + +Key features: +- List conversations with filtering and pagination +- Get detailed conversation data including transcripts and segments +- Access conversation audio files as resources +- User-scoped access with proper authentication +""" + +import base64 +import contextvars +import json +import logging +from pathlib import Path +from typing import Optional, List + +from fastapi import FastAPI, Request +from fastapi.routing import APIRouter +from mcp.server.fastmcp import FastMCP +from mcp.server.sse import SseServerTransport + +from advanced_omi_backend.config import CHUNK_DIR +from advanced_omi_backend.models.conversation import Conversation +from advanced_omi_backend.models.user import User + +logger = logging.getLogger(__name__) + +# Initialize MCP +mcp = FastMCP("friend-lite-conversations") + +# Context variables for user_id +user_id_var: contextvars.ContextVar[str] = contextvars.ContextVar("user_id") + +# Create a router for MCP endpoints +mcp_router = APIRouter(prefix="/mcp") + +# Initialize SSE transport +sse = SseServerTransport("/mcp/messages/") + + +async def resolve_user_identifier(identifier: str) -> Optional[str]: + """ + Resolve a user identifier (email or user_id) to a user_id. + + Args: + identifier: Either an email address or a MongoDB ObjectId string + + Returns: + User ID string if found, None otherwise + """ + try: + # First try to find by email (case-insensitive) + user = await User.find_one(User.email == identifier.lower()) + if user: + logger.info(f"Resolved email '{identifier}' to user_id: {user.id}") + return str(user.id) + + # If not found by email, assume it's already a user_id + # Verify it exists + from bson import ObjectId + try: + user = await User.find_one(User.id == ObjectId(identifier)) + if user: + logger.info(f"Verified user_id: {identifier}") + return str(user.id) + except: + pass + + logger.warning(f"Could not resolve user identifier: {identifier}") + return None + except Exception as e: + logger.error(f"Error resolving user identifier '{identifier}': {e}") + return None + + +@mcp.tool(description="List all conversations. Returns conversation_id, title, summary, created_at, client_id, segment_count, memory_count, and has_audio. Supports date filtering and pagination.") +async def list_conversations( + limit: int = 20, + offset: int = 0, + order_by: str = "created_at_desc", + start_date: Optional[str] = None, + end_date: Optional[str] = None +) -> str: + """ + List conversations with optional date filtering. + + Args: + limit: Maximum number of conversations to return (default: 20, max: 100) + offset: Number of conversations to skip for pagination (default: 0) + order_by: Sort order - "created_at_desc" (newest first) or "created_at_asc" (oldest first) + start_date: Optional ISO 8601 date string (e.g., "2025-01-01T00:00:00Z") - filter conversations after this date + end_date: Optional ISO 8601 date string (e.g., "2025-12-31T23:59:59Z") - filter conversations before this date + + Returns: + JSON string with list of conversations and pagination info + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Validate and limit parameters + limit = min(max(1, limit), 100) # Clamp between 1 and 100 + offset = max(0, offset) + + # Build base query + # If uid is "all", return all conversations (temporary for development) + # In the future, this will filter by speaker identity + if uid == "all": + query = Conversation.find_all() + else: + query = Conversation.find(Conversation.user_id == uid) + + # Apply date filtering if provided + from datetime import datetime + + if start_date: + try: + start_dt = datetime.fromisoformat(start_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime >= start_dt) + except ValueError as e: + logger.warning(f"Invalid start_date format: {start_date}, error: {e}") + return json.dumps({"error": f"Invalid start_date format: {start_date}. Use ISO 8601 format."}, indent=2) + + if end_date: + try: + end_dt = datetime.fromisoformat(end_date.replace('Z', '+00:00')) + query = query.find(Conversation.start_datetime <= end_dt) + except ValueError as e: + logger.warning(f"Invalid end_date format: {end_date}, error: {e}") + return json.dumps({"error": f"Invalid end_date format: {end_date}. Use ISO 8601 format."}, indent=2) + + # Get total count with same filters + total_count = await query.count() + + # Apply sorting + if order_by == "created_at_asc": + query = query.sort(Conversation.start_datetime) + else: # Default to newest first + query = query.sort(-Conversation.start_datetime) + + # Apply pagination + conversations = await query.skip(offset).limit(limit).to_list() + + # Format conversations for response + formatted_convs = [] + for conv in conversations: + + formatted_convs.append({ + "conversation_id": conv.conversation_id, + "title": conv.title, + "summary": conv.summary, + "start_datetime": conv.start_datetime.isoformat(), + "end_datetime": conv.end_datetime.isoformat() if conv.end_datetime else None, + "segment_count": len(conv.segments), + "memory_count": conv.memory_count, + "client_id": conv.client_id, + }) + + + result = { + "conversations": formatted_convs, + "pagination": { + "total": total_count, + "limit": limit, + "offset": offset, + "returned": len(formatted_convs), + "has_more": (offset + len(formatted_convs)) < total_count + } + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error listing conversations: {e}") + return json.dumps({"error": f"Failed to list conversations: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get detailed information about a specific conversation including full transcript, speaker segments, memories, and version history. Use the conversation_id from list_conversations.") +async def get_conversation(conversation_id: str) -> str: + """ + Get detailed conversation data. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with complete conversation details + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format conversation data with explicit fields + conv_data = { + # Core identifiers + "conversation_id": conversation.conversation_id, + "audio_uuid": conversation.audio_uuid, + "user_id": conversation.user_id, + "client_id": conversation.client_id, + + # Metadata + "start_datetime": conversation.start_datetime.isoformat(), + "end_datetime": conversation.end_datetime.isoformat() if conversation.end_datetime else None, + "title": conversation.title, + "summary": conversation.summary, + # "detailed_summary": conversation.detailed_summary, + + # Transcript data + "transcript": conversation.transcript, + + # Memory data + "memory_count": conversation.memory_count, + + # Audio paths + "has_audio": bool(conversation.audio_path), + "has_cropped_audio": bool(conversation.cropped_audio_path), + + # Version information + "active_transcript_version": conversation.active_transcript_version, + "active_memory_version": conversation.active_memory_version, + "transcript_versions_count": len(conversation.transcript_versions), + "memory_versions_count": len(conversation.memory_versions) + } + + return json.dumps(conv_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get conversation: {str(e)}"}, indent=2) + + +@mcp.tool(description="Get speaker segments from a conversation. Returns detailed timing and speaker information for each segment of the transcript.") +async def get_segments_from_conversation(conversation_id: str) -> str: + """ + Get speaker segments from a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + JSON string with speaker segments including timing and text + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Format segments + segments_data = { + "conversation_id": conversation_id, + "segment_count": len(conversation.segments), + "segments": [ + { + "start": seg.start, + "end": seg.end, + "duration": seg.end - seg.start, + "text": seg.text, + "speaker": seg.speaker, + "confidence": seg.confidence + } for seg in conversation.segments + ] + } + + return json.dumps(segments_data, indent=2) + + except Exception as e: + logger.exception(f"Error getting segments for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get segments: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/audio", name="Conversation Audio", description="Get the audio file for a conversation") +async def get_conversation_audio(conversation_id: str) -> str: + """ + Get audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Default to regular audio (not cropped) + audio_type = "audio" + + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get the appropriate audio path + if audio_type == "cropped_audio": + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + else: # Default to regular audio + audio_path = conversation.audio_path + if not audio_path: + return json.dumps({"error": "No audio file available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": audio_type, + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", # Friend-Lite stores audio as WAV + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get audio: {str(e)}"}, indent=2) + + +@mcp.resource(uri="conversation://{conversation_id}/cropped_audio", name="Conversation Cropped Audio", description="Get the cropped (speech-only) audio file for a conversation") +async def get_conversation_cropped_audio(conversation_id: str) -> str: + """ + Get cropped audio file for a conversation. + + Args: + conversation_id: The unique conversation identifier + + Returns: + Base64-encoded cropped audio data with metadata + """ + uid = user_id_var.get(None) + if not uid: + return json.dumps({"error": "user_id not provided"}, indent=2) + + try: + # Find the conversation + conversation = await Conversation.find_one( + Conversation.conversation_id == conversation_id + ) + + if not conversation: + return json.dumps({"error": f"Conversation '{conversation_id}' not found"}, indent=2) + + # Verify ownership (skip if uid is "all" for development) + if uid != "all" and conversation.user_id != uid: + return json.dumps({"error": "Access forbidden - conversation belongs to another user"}, indent=2) + + # Get cropped audio path + audio_path = conversation.cropped_audio_path + if not audio_path: + return json.dumps({"error": "No cropped audio available for this conversation"}, indent=2) + + # Resolve full path + full_path = CHUNK_DIR / audio_path + + if not full_path.exists(): + return json.dumps({"error": f"Audio file not found at path: {audio_path}"}, indent=2) + + # Read and encode audio file + with open(full_path, "rb") as f: + audio_data = f.read() + + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + result = { + "conversation_id": conversation_id, + "audio_type": "cropped_audio", + "file_path": str(audio_path), + "file_size_bytes": len(audio_data), + "mime_type": "audio/wav", + "audio_base64": audio_base64 + } + + return json.dumps(result, indent=2) + + except Exception as e: + logger.exception(f"Error getting cropped audio for conversation {conversation_id}: {e}") + return json.dumps({"error": f"Failed to get cropped audio: {str(e)}"}, indent=2) + + +@mcp_router.get("/conversations/sse") +async def handle_sse(request: Request): + """ + Handle SSE connections with Bearer token authentication. + + The access token should be provided in the Authorization header: + Authorization: Bearer + + Note: For development, this bypasses user authentication and returns all conversations. + In the future, this will validate speaker identity from conversations. + """ + from fastapi.responses import JSONResponse + + # Extract access token from Authorization header + auth_header = request.headers.get("authorization") + if not auth_header: + logger.error("No Authorization header provided") + return JSONResponse( + status_code=401, + content={"error": "Authorization header required. Use: Authorization: Bearer "} + ) + + # Parse Bearer token + parts = auth_header.split() + if len(parts) != 2 or parts[0].lower() != "bearer": + logger.error(f"Invalid Authorization header format: {auth_header}") + return JSONResponse( + status_code=401, + content={"error": "Invalid Authorization header. Use format: Authorization: Bearer "} + ) + + access_token = parts[1] + if not access_token: + logger.error("Empty access token") + return JSONResponse( + status_code=401, + content={"error": "Access token cannot be empty"} + ) + + # For now, use "all" as the user_id to bypass filtering + # This will be replaced with speaker-based permissions later + logger.info(f"MCP connection established with access token: {access_token[:min(8, len(access_token))]}...") + user_token = user_id_var.set("all") + + try: + # Handle SSE connection + async with sse.connect_sse( + request.scope, + request.receive, + request._send, + ) as (read_stream, write_stream): + await mcp._mcp_server.run( + read_stream, + write_stream, + mcp._mcp_server.create_initialization_options(), + ) + finally: + # Clean up context variables + user_id_var.reset(user_token) + + +@mcp_router.post("/messages/") +async def handle_get_message(request: Request): + return await handle_post_message(request) + + +@mcp_router.post("/conversations/sse/{user_id}/messages/") +async def handle_post_message_with_user(request: Request): + return await handle_post_message(request) + + +async def handle_post_message(request: Request): + """Handle POST messages for SSE""" + try: + body = await request.body() + + # Create a simple receive function that returns the body + async def receive(): + return {"type": "http.request", "body": body, "more_body": False} + + # Create a simple send function that does nothing + async def send(message): + return {} + + # Call handle_post_message with the correct arguments + await sse.handle_post_message(request.scope, receive, send) + + # Return a success response + return {"status": "ok"} + finally: + pass + + +def setup_mcp_server(app: FastAPI): + """Setup MCP server with the FastAPI application""" + mcp._mcp_server.name = "friend-lite-conversations" + + # Include MCP router in the FastAPI app + app.include_router(mcp_router) + + logger.info("Friend-Lite MCP server initialized with conversation tools") diff --git a/backends/advanced/src/advanced_omi_backend/settings_manager.py b/backends/advanced/src/advanced_omi_backend/settings_manager.py new file mode 100644 index 00000000..67873334 --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_manager.py @@ -0,0 +1,485 @@ +""" +Dynamic settings manager with MongoDB storage and caching. + +Settings are loaded from environment variables on first initialization, +then stored in MongoDB. Subsequent loads use MongoDB as the source of truth. +Changes take effect within the cache TTL (default: 5 seconds). +""" + +import logging +import os +import time +from typing import Dict, Any, Optional, TypeVar, Type + +from motor.motor_asyncio import AsyncIOMotorDatabase + +from advanced_omi_backend.settings_models import ( + AllSettings, + ApiKeysSettings, + AudioProcessingSettings, + ConversationSettings, + DiarizationSettings, + InfrastructureSettings, + LLMSettings, + MiscSettings, + NetworkSettings, + ProviderSettings, + SpeechDetectionSettings, + TranscriptionProvider, +) + +logger = logging.getLogger(__name__) + +T = TypeVar('T') + + +class SettingsManager: + """ + Manages dynamic application settings with MongoDB storage and caching. + + Settings are stored in the 'application_settings' collection with documents: + { + "_id": "speech_detection", # Setting category + "values": {...}, # Pydantic model dict + "updated_at": datetime, + "updated_by": "user_id or 'system'" + } + """ + + def __init__(self, db: AsyncIOMotorDatabase, cache_ttl: int = 5): + """ + Initialize settings manager. + + Args: + db: MongoDB database instance + cache_ttl: Cache TTL in seconds (default: 5) + """ + self.db = db + self.settings_col = db["application_settings"] + self.cache_ttl = cache_ttl + + # Cache storage + self._cache: Dict[str, Any] = {} + self._cache_time: Dict[str, float] = {} + + # Initialization flag + self._initialized = False + + async def initialize(self): + """ + Initialize settings from environment variables if not already in MongoDB. + + This is called once on application startup to migrate existing env vars + to the database. + """ + if self._initialized: + return + + logger.info("Initializing settings manager...") + + # Check if settings already exist in DB + count = await self.settings_col.count_documents({}) + + if count == 0: + # First time setup - load from env vars + logger.info("No settings found in database, initializing from environment variables") + await self._initialize_from_env() + else: + logger.info(f"Found {count} setting categories in database") + + self._initialized = True + + async def _initialize_from_env(self): + """Initialize all settings from environment variables.""" + + # Speech detection + speech_detection = SpeechDetectionSettings( + min_words=int(os.getenv("SPEECH_DETECTION_MIN_WORDS", "5")), + min_confidence=float(os.getenv("SPEECH_DETECTION_MIN_CONFIDENCE", "0.5")), + min_duration=float(os.getenv("SPEECH_DETECTION_MIN_DURATION", "10.0")), + ) + await self._save_to_db("speech_detection", speech_detection.dict(), "system") + + # Conversation settings + conversation = ConversationSettings( + transcription_buffer_seconds=float(os.getenv("TRANSCRIPTION_BUFFER_SECONDS", "120")), + speech_inactivity_threshold=float(os.getenv("SPEECH_INACTIVITY_THRESHOLD_SECONDS", "60")), + new_conversation_timeout_minutes=float(os.getenv("NEW_CONVERSATION_TIMEOUT_MINUTES", "1.5")), + record_only_enrolled_speakers=os.getenv("RECORD_ONLY_ENROLLED_SPEAKERS", "true").lower() == "true", + ) + await self._save_to_db("conversation", conversation.dict(), "system") + + # Audio processing + audio_processing = AudioProcessingSettings( + audio_cropping_enabled=os.getenv("AUDIO_CROPPING_ENABLED", "true").lower() == "true", + min_speech_segment_duration=float(os.getenv("MIN_SPEECH_SEGMENT_DURATION", "1.0")), + cropping_context_padding=float(os.getenv("CROPPING_CONTEXT_PADDING", "0.1")), + ) + await self._save_to_db("audio_processing", audio_processing.dict(), "system") + + # Diarization (load from existing config or defaults) + from advanced_omi_backend.config import _diarization_settings + if _diarization_settings: + diarization = DiarizationSettings(**_diarization_settings) + else: + diarization = DiarizationSettings() + await self._save_to_db("diarization", diarization.dict(), "system") + + # LLM settings + llm = LLMSettings( + llm_provider=os.getenv("LLM_PROVIDER", "openai"), + openai_model=os.getenv("OPENAI_MODEL", "gpt-4o-mini"), + chat_llm_model=os.getenv("CHAT_LLM_MODEL"), + chat_temperature=float(os.getenv("CHAT_TEMPERATURE", "0.7")), + ollama_model=os.getenv("OLLAMA_MODEL", "llama3.1:latest"), + ollama_embedder_model=os.getenv("OLLAMA_EMBEDDER_MODEL", "nomic-embed-text:latest"), + ) + await self._save_to_db("llm", llm.dict(), "system") + + # Provider settings + transcription_provider = os.getenv("TRANSCRIPTION_PROVIDER", "auto") + # Map empty string to "auto" + if not transcription_provider: + transcription_provider = "auto" + + providers = ProviderSettings( + memory_provider=os.getenv("MEMORY_PROVIDER", "chronicle"), + transcription_provider=transcription_provider, + ) + await self._save_to_db("providers", providers.dict(), "system") + + # Network settings + network = NetworkSettings( + host_ip=os.getenv("HOST_IP", "localhost"), + backend_public_port=int(os.getenv("BACKEND_PUBLIC_PORT", "8000")), + webui_port=int(os.getenv("WEBUI_PORT", "5173")), + cors_origins=os.getenv("CORS_ORIGINS", "http://localhost:5173,http://localhost:3000"), + ) + await self._save_to_db("network", network.dict(), "system") + + # Infrastructure settings + from advanced_omi_backend.app_config import get_app_config + config = get_app_config() + infrastructure = InfrastructureSettings( + mongodb_uri=config.mongodb_uri, + mongodb_database=config.mongodb_database, + redis_url=config.redis_url, + qdrant_base_url=config.qdrant_base_url, + qdrant_port=config.qdrant_port, + neo4j_host=os.getenv("NEO4J_HOST", "neo4j-mem0"), + neo4j_user=os.getenv("NEO4J_USER", "neo4j"), + ) + await self._save_to_db("infrastructure", infrastructure.dict(), "system") + + # Misc settings + misc = MiscSettings( + debug_dir=os.getenv("DEBUG_DIR", "./data/debug_dir"), + langfuse_enable_telemetry=os.getenv("LANGFUSE_ENABLE_TELEMETRY", "false").lower() == "true", + ) + await self._save_to_db("misc", misc.dict(), "system") + + # API Keys settings - read from .env.api-keys file first, fallback to env vars + from advanced_omi_backend.utils.api_keys_manager import read_api_keys_from_file + + file_keys = read_api_keys_from_file(".env.api-keys") + api_keys = ApiKeysSettings( + openai_api_key=file_keys.get("openai_api_key") or os.getenv("OPENAI_API_KEY"), + deepgram_api_key=file_keys.get("deepgram_api_key") or os.getenv("DEEPGRAM_API_KEY"), + mistral_api_key=file_keys.get("mistral_api_key") or os.getenv("MISTRAL_API_KEY"), + hf_token=file_keys.get("hf_token") or os.getenv("HF_TOKEN"), + langfuse_public_key=file_keys.get("langfuse_public_key") or os.getenv("LANGFUSE_PUBLIC_KEY"), + langfuse_secret_key=file_keys.get("langfuse_secret_key") or os.getenv("LANGFUSE_SECRET_KEY"), + ngrok_authtoken=file_keys.get("ngrok_authtoken") or os.getenv("NGROK_AUTHTOKEN"), + ) + await self._save_to_db("api_keys", api_keys.dict(), "system") + + logger.info("✅ Initialized all settings from environment variables") + + async def _get_from_cache_or_db( + self, + key: str, + model_class: Type[T], + ) -> T: + """ + Get settings from cache or database. + + Args: + key: Settings category key + model_class: Pydantic model class + + Returns: + Instance of model_class with current settings + """ + # Check cache freshness + if key in self._cache: + age = time.time() - self._cache_time.get(key, 0) + if age < self.cache_ttl: + return self._cache[key] + + # Load from DB + doc = await self.settings_col.find_one({"_id": key}) + + if doc and "values" in doc: + settings = model_class(**doc["values"]) + else: + # Use defaults if not found + logger.warning(f"Settings '{key}' not found in database, using defaults") + settings = model_class() + + # Update cache + self._cache[key] = settings + self._cache_time[key] = time.time() + + return settings + + async def _save_to_db(self, key: str, values: dict, updated_by: str = "user"): + """ + Save settings to database. + + Args: + key: Settings category key + values: Settings values as dict + updated_by: User ID or 'system' + """ + from datetime import datetime + + await self.settings_col.update_one( + {"_id": key}, + { + "$set": { + "values": values, + "updated_at": datetime.utcnow(), + "updated_by": updated_by, + } + }, + upsert=True, + ) + + async def _update_settings( + self, + key: str, + settings: T, + updated_by: str = "user", + ): + """ + Update settings in database and cache. + + Args: + key: Settings category key + settings: Pydantic model instance + updated_by: User ID or 'system' + """ + # Save to DB + await self._save_to_db(key, settings.dict(), updated_by) + + # Update cache immediately + self._cache[key] = settings + self._cache_time[key] = time.time() + + logger.info(f"Updated settings '{key}' (by: {updated_by})") + + # Speech Detection Settings + + async def get_speech_detection(self) -> SpeechDetectionSettings: + """Get speech detection settings.""" + return await self._get_from_cache_or_db("speech_detection", SpeechDetectionSettings) + + async def update_speech_detection( + self, + settings: SpeechDetectionSettings, + updated_by: str = "user", + ): + """Update speech detection settings.""" + await self._update_settings("speech_detection", settings, updated_by) + + # Conversation Settings + + async def get_conversation(self) -> ConversationSettings: + """Get conversation management settings.""" + return await self._get_from_cache_or_db("conversation", ConversationSettings) + + async def update_conversation( + self, + settings: ConversationSettings, + updated_by: str = "user", + ): + """Update conversation management settings.""" + await self._update_settings("conversation", settings, updated_by) + + # Audio Processing Settings + + async def get_audio_processing(self) -> AudioProcessingSettings: + """Get audio processing settings.""" + return await self._get_from_cache_or_db("audio_processing", AudioProcessingSettings) + + async def update_audio_processing( + self, + settings: AudioProcessingSettings, + updated_by: str = "user", + ): + """Update audio processing settings.""" + await self._update_settings("audio_processing", settings, updated_by) + + # Diarization Settings + + async def get_diarization(self) -> DiarizationSettings: + """Get diarization settings.""" + return await self._get_from_cache_or_db("diarization", DiarizationSettings) + + async def update_diarization( + self, + settings: DiarizationSettings, + updated_by: str = "user", + ): + """Update diarization settings.""" + await self._update_settings("diarization", settings, updated_by) + + # LLM Settings + + async def get_llm(self) -> LLMSettings: + """Get LLM settings.""" + return await self._get_from_cache_or_db("llm", LLMSettings) + + async def update_llm( + self, + settings: LLMSettings, + updated_by: str = "user", + ): + """Update LLM settings.""" + await self._update_settings("llm", settings, updated_by) + + # Provider Settings + + async def get_providers(self) -> ProviderSettings: + """Get provider settings.""" + return await self._get_from_cache_or_db("providers", ProviderSettings) + + async def update_providers( + self, + settings: ProviderSettings, + updated_by: str = "user", + ): + """Update provider settings.""" + await self._update_settings("providers", settings, updated_by) + + # Network Settings + + async def get_network(self) -> NetworkSettings: + """Get network settings.""" + return await self._get_from_cache_or_db("network", NetworkSettings) + + async def update_network( + self, + settings: NetworkSettings, + updated_by: str = "user", + ): + """Update network settings.""" + await self._update_settings("network", settings, updated_by) + + # Infrastructure Settings + + async def get_infrastructure(self) -> InfrastructureSettings: + """Get infrastructure settings.""" + return await self._get_from_cache_or_db("infrastructure", InfrastructureSettings) + + async def update_infrastructure( + self, + settings: InfrastructureSettings, + updated_by: str = "user", + ): + """Update infrastructure settings.""" + await self._update_settings("infrastructure", settings, updated_by) + + # Misc Settings + + async def get_misc(self) -> MiscSettings: + """Get miscellaneous settings.""" + return await self._get_from_cache_or_db("misc", MiscSettings) + + async def update_misc( + self, + settings: MiscSettings, + updated_by: str = "user", + ): + """Update miscellaneous settings.""" + await self._update_settings("misc", settings, updated_by) + + # API Keys Settings + + async def get_api_keys(self) -> ApiKeysSettings: + """Get API keys settings.""" + return await self._get_from_cache_or_db("api_keys", ApiKeysSettings) + + async def update_api_keys( + self, + settings: ApiKeysSettings, + updated_by: str = "user", + ): + """Update API keys settings.""" + await self._update_settings("api_keys", settings, updated_by) + + # Combined Settings + + async def get_all_settings(self) -> AllSettings: + """Get all settings combined.""" + return AllSettings( + speech_detection=await self.get_speech_detection(), + conversation=await self.get_conversation(), + audio_processing=await self.get_audio_processing(), + diarization=await self.get_diarization(), + llm=await self.get_llm(), + providers=await self.get_providers(), + network=await self.get_network(), + infrastructure=await self.get_infrastructure(), + misc=await self.get_misc(), + api_keys=await self.get_api_keys(), + ) + + async def update_all_settings( + self, + settings: AllSettings, + updated_by: str = "user", + ): + """Update all settings at once.""" + await self.update_speech_detection(settings.speech_detection, updated_by) + await self.update_conversation(settings.conversation, updated_by) + await self.update_audio_processing(settings.audio_processing, updated_by) + await self.update_diarization(settings.diarization, updated_by) + await self.update_llm(settings.llm, updated_by) + await self.update_providers(settings.providers, updated_by) + await self.update_network(settings.network, updated_by) + await self.update_infrastructure(settings.infrastructure, updated_by) + await self.update_misc(settings.misc, updated_by) + await self.update_api_keys(settings.api_keys, updated_by) + + def invalidate_cache(self, key: Optional[str] = None): + """ + Force settings to reload from database on next access. + + Args: + key: Specific settings category to invalidate, or None for all + """ + if key: + self._cache_time[key] = 0 + logger.info(f"Invalidated cache for '{key}'") + else: + self._cache_time.clear() + logger.info("Invalidated all settings cache") + + +# Global settings manager instance (initialized in main.py) +_settings_manager: Optional[SettingsManager] = None + + +def init_settings_manager(db: AsyncIOMotorDatabase): + """Initialize the global settings manager.""" + global _settings_manager + _settings_manager = SettingsManager(db) + return _settings_manager + + +def get_settings_manager() -> SettingsManager: + """Get the global settings manager instance.""" + if _settings_manager is None: + raise RuntimeError("Settings manager not initialized. Call init_settings_manager() first.") + return _settings_manager diff --git a/backends/advanced/src/advanced_omi_backend/settings_models.py b/backends/advanced/src/advanced_omi_backend/settings_models.py new file mode 100644 index 00000000..68742f1e --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/settings_models.py @@ -0,0 +1,320 @@ +""" +Pydantic models for dynamic application settings. + +These settings can be changed by users through the UI and take effect +without requiring a server restart (within the cache TTL). +""" + +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field, validator + + +class LLMProvider(str, Enum): + """Supported LLM providers.""" + OPENAI = "openai" + OLLAMA = "ollama" + + +class MemoryProvider(str, Enum): + """Supported memory providers.""" + CHRONICLE = "chronicle" + OPENMEMORY_MCP = "openmemory_mcp" + MYCELIA = "mycelia" + + +class TranscriptionProvider(str, Enum): + """Supported transcription providers.""" + DEEPGRAM = "deepgram" + MISTRAL = "mistral" + PARAKEET = "parakeet" + AUTO = "auto" + + +class DiarizationSource(str, Enum): + """Supported diarization sources.""" + PYANNOTE = "pyannote" + DEEPGRAM = "deepgram" + + +class SpeechDetectionSettings(BaseModel): + """Speech detection settings for conversation creation.""" + + min_words: int = Field( + default=5, + ge=1, + le=100, + description="Minimum words required to create a conversation" + ) + min_confidence: float = Field( + default=0.5, + ge=0.0, + le=1.0, + description="Minimum word confidence threshold (0.0-1.0)" + ) + min_duration: float = Field( + default=10.0, + ge=0.0, + description="Minimum speech duration in seconds" + ) + + +class ConversationSettings(BaseModel): + """Conversation management settings.""" + + transcription_buffer_seconds: float = Field( + default=120.0, + ge=10.0, + le=600.0, + description="Trigger transcription every N seconds" + ) + speech_inactivity_threshold: float = Field( + default=60.0, + ge=10.0, + le=600.0, + description="Close conversation after N seconds of no speech" + ) + new_conversation_timeout_minutes: float = Field( + default=1.5, + ge=0.1, + le=60.0, + description="Timeout for creating new conversations (minutes)" + ) + record_only_enrolled_speakers: bool = Field( + default=True, + description="Only create conversations when enrolled speakers are detected" + ) + + +class AudioProcessingSettings(BaseModel): + """Audio processing settings.""" + + audio_cropping_enabled: bool = Field( + default=True, + description="Enable automatic silence removal from audio" + ) + min_speech_segment_duration: float = Field( + default=1.0, + ge=0.1, + le=10.0, + description="Minimum speech segment duration in seconds" + ) + cropping_context_padding: float = Field( + default=0.1, + ge=0.0, + le=1.0, + description="Context padding around speech segments" + ) + + +class DiarizationSettings(BaseModel): + """Speaker diarization settings.""" + + diarization_source: DiarizationSource = Field( + default=DiarizationSource.PYANNOTE, + description="Diarization service to use" + ) + similarity_threshold: float = Field( + default=0.15, + ge=0.0, + le=1.0, + description="Speaker similarity threshold" + ) + min_duration: float = Field( + default=0.5, + ge=0.0, + description="Minimum segment duration" + ) + collar: float = Field( + default=2.0, + ge=0.0, + description="Collar for segment merging (seconds)" + ) + min_duration_off: float = Field( + default=1.5, + ge=0.0, + description="Minimum silence duration between segments" + ) + min_speakers: int = Field( + default=2, + ge=1, + le=10, + description="Minimum number of speakers" + ) + max_speakers: int = Field( + default=6, + ge=1, + le=20, + description="Maximum number of speakers" + ) + + @validator('max_speakers') + def validate_max_speakers(cls, v, values): + """Ensure max_speakers >= min_speakers.""" + if 'min_speakers' in values and v < values['min_speakers']: + raise ValueError('max_speakers must be >= min_speakers') + return v + + +class LLMSettings(BaseModel): + """LLM provider and model settings.""" + + llm_provider: LLMProvider = Field( + default=LLMProvider.OPENAI, + description="LLM provider to use" + ) + openai_model: str = Field( + default="gpt-4o-mini", + description="OpenAI model for general tasks" + ) + chat_llm_model: Optional[str] = Field( + default=None, + description="Model for chat (defaults to openai_model if not set)" + ) + chat_temperature: float = Field( + default=0.7, + ge=0.0, + le=2.0, + description="Temperature for chat responses" + ) + ollama_model: Optional[str] = Field( + default="llama3.1:latest", + description="Ollama model name" + ) + ollama_embedder_model: Optional[str] = Field( + default="nomic-embed-text:latest", + description="Ollama embedder model name" + ) + + +class ProviderSettings(BaseModel): + """Service provider selection settings.""" + + memory_provider: MemoryProvider = Field( + default=MemoryProvider.CHRONICLE, + description="Memory provider to use" + ) + transcription_provider: TranscriptionProvider = Field( + default=TranscriptionProvider.AUTO, + description="Transcription provider (auto-selects if 'auto')" + ) + + +class NetworkSettings(BaseModel): + """Network and public access settings.""" + + host_ip: str = Field( + default="localhost", + description="Public IP/hostname for browser access" + ) + backend_public_port: int = Field( + default=8000, + ge=1, + le=65535, + description="Backend API public port" + ) + webui_port: int = Field( + default=5173, + ge=1, + le=65535, + description="WebUI port" + ) + cors_origins: str = Field( + default="http://localhost:5173,http://localhost:3000,http://127.0.0.1:5173,http://127.0.0.1:3000", + description="Comma-separated list of CORS origins" + ) + + +class InfrastructureSettings(BaseModel): + """Core infrastructure service settings.""" + + mongodb_uri: str = Field( + default="mongodb://mongo:27017", + description="MongoDB connection URI" + ) + mongodb_database: str = Field( + default="friend-lite", + description="MongoDB database name" + ) + redis_url: str = Field( + default="redis://localhost:6379/0", + description="Redis connection URL" + ) + qdrant_base_url: str = Field( + default="qdrant", + description="Qdrant base URL/hostname" + ) + qdrant_port: str = Field( + default="6333", + description="Qdrant port" + ) + neo4j_host: str = Field( + default="neo4j-mem0", + description="Neo4j host" + ) + neo4j_user: str = Field( + default="neo4j", + description="Neo4j username" + ) + + +class MiscSettings(BaseModel): + """Miscellaneous settings.""" + + debug_dir: str = Field( + default="./data/debug_dir", + description="Directory for debug files" + ) + langfuse_enable_telemetry: bool = Field( + default=False, + description="Enable Langfuse telemetry" + ) + + +class ApiKeysSettings(BaseModel): + """External service API keys.""" + + openai_api_key: Optional[str] = Field( + default=None, + description="OpenAI API Key" + ) + deepgram_api_key: Optional[str] = Field( + default=None, + description="Deepgram API Key" + ) + mistral_api_key: Optional[str] = Field( + default=None, + description="Mistral API Key" + ) + hf_token: Optional[str] = Field( + default=None, + description="HuggingFace Token" + ) + langfuse_public_key: Optional[str] = Field( + default=None, + description="Langfuse Public Key" + ) + langfuse_secret_key: Optional[str] = Field( + default=None, + description="Langfuse Secret Key" + ) + ngrok_authtoken: Optional[str] = Field( + default=None, + description="Ngrok Auth Token" + ) + + +class AllSettings(BaseModel): + """Combined model for all application settings.""" + + speech_detection: SpeechDetectionSettings = Field(default_factory=SpeechDetectionSettings) + conversation: ConversationSettings = Field(default_factory=ConversationSettings) + audio_processing: AudioProcessingSettings = Field(default_factory=AudioProcessingSettings) + diarization: DiarizationSettings = Field(default_factory=DiarizationSettings) + llm: LLMSettings = Field(default_factory=LLMSettings) + providers: ProviderSettings = Field(default_factory=ProviderSettings) + network: NetworkSettings = Field(default_factory=NetworkSettings) + infrastructure: InfrastructureSettings = Field(default_factory=InfrastructureSettings) + misc: MiscSettings = Field(default_factory=MiscSettings) + api_keys: ApiKeysSettings = Field(default_factory=ApiKeysSettings) diff --git a/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py new file mode 100644 index 00000000..1eca417d --- /dev/null +++ b/backends/advanced/src/advanced_omi_backend/utils/api_keys_manager.py @@ -0,0 +1,168 @@ +""" +API Keys Manager - Handle reading/writing API keys from file and database. +""" + +import logging +import os +from pathlib import Path +from typing import Dict, Optional + +logger = logging.getLogger(__name__) + + +def mask_api_key(key: Optional[str]) -> Optional[str]: + """ + Mask an API key for display purposes. + + Shows first 7 chars and last 4 chars, masks the middle. + Example: sk-1234567890abcdef -> sk-1234***cdef + """ + if not key or len(key) < 12: + return None + + return f"{key[:7]}****{key[-4:]}" + + +def read_api_keys_from_file(file_path: str = ".env.api-keys") -> Dict[str, Optional[str]]: + """ + Read API keys from .env.api-keys file. + + Returns: + Dictionary of API key values (not masked) + """ + keys = { + "openai_api_key": None, + "deepgram_api_key": None, + "mistral_api_key": None, + "hf_token": None, + "langfuse_public_key": None, + "langfuse_secret_key": None, + "ngrok_authtoken": None, + } + + # Check if file exists + if not os.path.exists(file_path): + logger.warning(f"API keys file not found: {file_path}") + return keys + + try: + with open(file_path, 'r') as f: + for line in f: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse key=value + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Map env var names to our field names + if key == "OPENAI_API_KEY" and value: + keys["openai_api_key"] = value + elif key == "DEEPGRAM_API_KEY" and value: + keys["deepgram_api_key"] = value + elif key == "MISTRAL_API_KEY" and value: + keys["mistral_api_key"] = value + elif key == "HF_TOKEN" and value: + keys["hf_token"] = value + elif key == "LANGFUSE_PUBLIC_KEY" and value: + keys["langfuse_public_key"] = value + elif key == "LANGFUSE_SECRET_KEY" and value: + keys["langfuse_secret_key"] = value + elif key == "NGROK_AUTHTOKEN" and value: + keys["ngrok_authtoken"] = value + + logger.info(f"Loaded API keys from {file_path}") + return keys + + except Exception as e: + logger.error(f"Error reading API keys file: {e}") + return keys + + +def write_api_keys_to_file(keys: Dict[str, Optional[str]], file_path: str = ".env.api-keys") -> bool: + """ + Write API keys to .env.api-keys file. + + Args: + keys: Dictionary of API key values + file_path: Path to the .env.api-keys file + + Returns: + True if successful, False otherwise + """ + try: + # Read template for structure/comments + template_path = f"{file_path}.template" + template_lines = [] + + if os.path.exists(template_path): + with open(template_path, 'r') as f: + template_lines = f.readlines() + + # Build output content + output_lines = [] + + if template_lines: + # Use template structure + for line in template_lines: + stripped = line.strip() + + # Keep comments and empty lines + if not stripped or stripped.startswith('#'): + output_lines.append(line) + continue + + # Parse key=value from template + if '=' in stripped: + key_name = stripped.split('=', 1)[0].strip() + + # Replace with actual values if provided + if key_name == "OPENAI_API_KEY": + value = keys.get("openai_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "DEEPGRAM_API_KEY": + value = keys.get("deepgram_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "MISTRAL_API_KEY": + value = keys.get("mistral_api_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "HF_TOKEN": + value = keys.get("hf_token", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_PUBLIC_KEY": + value = keys.get("langfuse_public_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "LANGFUSE_SECRET_KEY": + value = keys.get("langfuse_secret_key", "") + output_lines.append(f"{key_name}={value}\n") + elif key_name == "NGROK_AUTHTOKEN": + value = keys.get("ngrok_authtoken", "") + output_lines.append(f"{key_name}={value}\n") + else: + # Keep other keys from template unchanged + output_lines.append(line) + else: + # No template - create simple format + output_lines.append("# API Keys\n\n") + output_lines.append(f"OPENAI_API_KEY={keys.get('openai_api_key', '')}\n") + output_lines.append(f"DEEPGRAM_API_KEY={keys.get('deepgram_api_key', '')}\n") + output_lines.append(f"MISTRAL_API_KEY={keys.get('mistral_api_key', '')}\n") + output_lines.append(f"HF_TOKEN={keys.get('hf_token', '')}\n") + output_lines.append(f"LANGFUSE_PUBLIC_KEY={keys.get('langfuse_public_key', '')}\n") + output_lines.append(f"LANGFUSE_SECRET_KEY={keys.get('langfuse_secret_key', '')}\n") + output_lines.append(f"NGROK_AUTHTOKEN={keys.get('ngrok_authtoken', '')}\n") + + # Write to file + with open(file_path, 'w') as f: + f.writelines(output_lines) + + logger.info(f"Wrote API keys to {file_path}") + return True + + except Exception as e: + logger.error(f"Error writing API keys file: {e}") + return False diff --git a/backends/advanced/webui/package-lock.json b/backends/advanced/webui/package-lock.json index ead72812..1090d0bb 100644 --- a/backends/advanced/webui/package-lock.json +++ b/backends/advanced/webui/package-lock.json @@ -32,7 +32,7 @@ "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.4.32", - "sass-embedded": "^1.83.0", + "sass-embedded": "^1.80.7", "tailwindcss": "^3.3.0", "typescript": "^5.2.2", "vite": "^5.0.8" diff --git a/backends/advanced/webui/package.json b/backends/advanced/webui/package.json index b933d8db..250df867 100644 --- a/backends/advanced/webui/package.json +++ b/backends/advanced/webui/package.json @@ -34,7 +34,7 @@ "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.5", "postcss": "^8.4.32", - "sass-embedded": "^1.83.0", + "sass-embedded": "^1.80.7", "tailwindcss": "^3.3.0", "typescript": "^5.2.2", "vite": "^5.0.8" diff --git a/backends/advanced/webui/src/App.tsx b/backends/advanced/webui/src/App.tsx index fca59623..4c9add41 100644 --- a/backends/advanced/webui/src/App.tsx +++ b/backends/advanced/webui/src/App.tsx @@ -13,6 +13,7 @@ import System from './pages/System' import Upload from './pages/Upload' import Queue from './pages/Queue' import LiveRecord from './pages/LiveRecord' +import Settings from './pages/Settings' import ProtectedRoute from './components/auth/ProtectedRoute' import { ErrorBoundary, PageErrorBoundary } from './components/ErrorBoundary' @@ -89,6 +90,11 @@ function App() { } /> + + + + } /> diff --git a/backends/advanced/webui/src/components/layout/Layout.tsx b/backends/advanced/webui/src/components/layout/Layout.tsx index 5995f823..83a161ab 100644 --- a/backends/advanced/webui/src/components/layout/Layout.tsx +++ b/backends/advanced/webui/src/components/layout/Layout.tsx @@ -15,10 +15,11 @@ export default function Layout() { { path: '/memories', label: 'Memories', icon: Brain }, { path: '/timeline', label: 'Timeline', icon: Calendar }, { path: '/users', label: 'User Management', icon: Users }, + { path: '/settings', label: 'Settings', icon: Settings }, ...(isAdmin ? [ { path: '/upload', label: 'Upload Audio', icon: Upload }, { path: '/queue', label: 'Queue Management', icon: Layers }, - { path: '/system', label: 'System State', icon: Settings }, + { path: '/system', label: 'System State', icon: Shield }, ] : []), ] diff --git a/backends/advanced/webui/src/contexts/AuthContext.tsx b/backends/advanced/webui/src/contexts/AuthContext.tsx index 7745e871..97a5b42c 100644 --- a/backends/advanced/webui/src/contexts/AuthContext.tsx +++ b/backends/advanced/webui/src/contexts/AuthContext.tsx @@ -7,6 +7,8 @@ interface User { name: string email: string is_superuser: boolean + api_key?: string + api_key_created_at?: string } interface AuthContextType { diff --git a/backends/advanced/webui/src/pages/Settings.tsx b/backends/advanced/webui/src/pages/Settings.tsx new file mode 100644 index 00000000..bd3556d9 --- /dev/null +++ b/backends/advanced/webui/src/pages/Settings.tsx @@ -0,0 +1,1612 @@ +import { useState, useEffect } from 'react' +import { + Settings as SettingsIcon, + Key, + Copy, + Trash2, + RefreshCw, + CheckCircle, + AlertCircle, + Save, + Server, + MessageSquare, + Mic, + Database, + Shield, + Brain, + Eye, + EyeOff, +} from 'lucide-react' +import { useAuth } from '../contexts/AuthContext' +import { settingsApi } from '../services/api' + +type Tab = 'core-infra' | 'api-keys' | 'mcp-key' | 'memory' | 'llm' | 'speech' | 'conversations' + +interface Message { + type: 'success' | 'error' + text: string +} + +export default function Settings() { + const { user } = useAuth() + const [activeTab, setActiveTab] = useState('core-infra') + + // MCP Key state + const [apiKey, setApiKey] = useState(null) + const [apiKeyCreatedAt, setApiKeyCreatedAt] = useState(null) + const [loading, setLoading] = useState(false) + const [copied, setCopied] = useState(false) + + // Infrastructure status state + const [infraStatus, setInfraStatus] = useState(null) + const [infraLoading, setInfraLoading] = useState(false) + + // Infrastructure settings state + const [infraSettings, setInfraSettings] = useState(null) + const [infraSettingsOriginal, setInfraSettingsOriginal] = useState(null) + const [infraSettingsLoading, setInfraSettingsLoading] = useState(false) + const [infraSettingsSaving, setInfraSettingsSaving] = useState(false) + + // API Keys settings state + const [apiKeysSettings, setApiKeysSettings] = useState(null) + const [apiKeysSettingsOriginal, setApiKeysSettingsOriginal] = useState(null) + const [apiKeysSettingsLoading, setApiKeysSettingsLoading] = useState(false) + const [apiKeysSettingsSaving, setApiKeysSettingsSaving] = useState(false) + + // API Keys visibility state + const [showApiKeys, setShowApiKeys] = useState>({ + openai_api_key: false, + deepgram_api_key: false, + mistral_api_key: false, + hf_token: false, + langfuse_public_key: false, + langfuse_secret_key: false, + ngrok_authtoken: false, + }) + + // API Keys save options + const [saveToFile, setSaveToFile] = useState(true) + const [saveToDatabase, setSaveToDatabase] = useState(true) + const [apiKeysFilePath, setApiKeysFilePath] = useState('.env.api-keys') + const [loadingFromFile, setLoadingFromFile] = useState(false) + + // Application settings state + const [appSettings, setAppSettings] = useState(null) + const [appSettingsLoading, setAppSettingsLoading] = useState(false) + + const [message, setMessage] = useState(null) + + useEffect(() => { + loadApiKeyInfo() + }, [user]) + + useEffect(() => { + if (activeTab === 'core-infra') { + if (!infraStatus) loadInfrastructureStatus() + if (!infraSettings) loadInfrastructureSettings() + if (!appSettings) loadApplicationSettings() // Load for network & misc settings + } else if (activeTab === 'api-keys') { + if (!apiKeysSettings) loadApiKeysSettings() + } else if (['memory', 'llm', 'speech', 'conversations'].includes(activeTab) && !appSettings) { + loadApplicationSettings() + } + }, [activeTab]) + + const loadApiKeyInfo = () => { + if (user?.api_key) { + setApiKey(user.api_key) + setApiKeyCreatedAt(user.api_key_created_at || null) + } + } + + const loadInfrastructureStatus = async () => { + try { + setInfraLoading(true) + const response = await settingsApi.getInfrastructureStatus() + setInfraStatus(response.data) + } catch (error: any) { + console.error('Failed to load infrastructure status:', error) + showMessage('error', 'Failed to load infrastructure status') + } finally { + setInfraLoading(false) + } + } + + const loadInfrastructureSettings = async () => { + try { + setInfraSettingsLoading(true) + const response = await settingsApi.getInfrastructure() + setInfraSettings(response.data) + setInfraSettingsOriginal(response.data) + } catch (error: any) { + console.error('Failed to load infrastructure settings:', error) + showMessage('error', 'Failed to load infrastructure settings') + } finally { + setInfraSettingsLoading(false) + } + } + + const saveInfrastructureSettings = async () => { + try { + setInfraSettingsSaving(true) + await settingsApi.updateInfrastructure(infraSettings) + setInfraSettingsOriginal(infraSettings) + showMessage('success', 'Infrastructure settings saved successfully') + // Reload status to reflect new settings + loadInfrastructureStatus() + } catch (error: any) { + console.error('Failed to save infrastructure settings:', error) + showMessage('error', error.response?.data?.detail || 'Failed to save infrastructure settings') + } finally { + setInfraSettingsSaving(false) + } + } + + const resetInfrastructureSettings = () => { + setInfraSettings({ ...infraSettingsOriginal }) + } + + const loadApiKeysSettings = async () => { + try { + setApiKeysSettingsLoading(true) + const response = await settingsApi.getApiKeys() + setApiKeysSettings(response.data) + setApiKeysSettingsOriginal(response.data) + } catch (error: any) { + console.error('Failed to load API keys settings:', error) + showMessage('error', 'Failed to load API keys settings') + } finally { + setApiKeysSettingsLoading(false) + } + } + + const saveApiKeysSettings = async () => { + try { + setApiKeysSettingsSaving(true) + const response = await settingsApi.saveApiKeys(apiKeysSettings, saveToFile, saveToDatabase) + + if (response.data.success) { + setApiKeysSettingsOriginal(apiKeysSettings) + const savedTo: string[] = [] + if (response.data.saved_to.file) savedTo.push('file') + if (response.data.saved_to.database) savedTo.push('database') + showMessage('success', `API keys saved to ${savedTo.join(' and ')}`) + } else { + showMessage('error', response.data.errors.join(', ') || 'Failed to save API keys') + } + } catch (error: any) { + console.error('Failed to save API keys:', error) + showMessage('error', error.response?.data?.detail || 'Failed to save API keys') + } finally { + setApiKeysSettingsSaving(false) + } + } + + const resetApiKeysSettings = () => { + setApiKeysSettings({ ...apiKeysSettingsOriginal }) + } + + // Toggle API key visibility + const toggleApiKeyVisibility = (keyName: string) => { + setShowApiKeys(prev => ({ ...prev, [keyName]: !prev[keyName] })) + } + + // Load API keys from file + const loadApiKeysFromFile = async () => { + try { + setLoadingFromFile(true) + const response = await settingsApi.loadApiKeysFromFile(apiKeysFilePath) + + if (response.data) { + setApiKeysSettings(response.data) + setApiKeysSettingsOriginal(response.data) + showMessage('success', `API keys loaded from ${apiKeysFilePath}`) + } + } catch (error: any) { + console.error('Failed to load API keys from file:', error) + showMessage('error', error.response?.data?.detail || 'Failed to load API keys from file') + } finally { + setLoadingFromFile(false) + } + } + + const loadApplicationSettings = async () => { + try { + setAppSettingsLoading(true) + const response = await settingsApi.getAllSettings() + setAppSettings(response.data) + } catch (error: any) { + console.error('Failed to load application settings:', error) + showMessage('error', 'Failed to load application settings') + } finally { + setAppSettingsLoading(false) + } + } + + const generateApiKey = async () => { + try { + setLoading(true) + setMessage(null) + + const response = await settingsApi.generateApiKey() + + setApiKey(response.data.api_key) + setApiKeyCreatedAt(response.data.created_at) + showMessage('success', 'MCP API key generated successfully!') + } catch (error: any) { + console.error('Failed to generate MCP API key:', error) + showMessage('error', error.response?.data?.detail || 'Failed to generate MCP API key') + } finally { + setLoading(false) + } + } + + const revokeApiKey = async () => { + if ( + !confirm( + 'Are you sure you want to revoke your MCP API key? This will break any existing MCP client integrations.' + ) + ) { + return + } + + try { + setLoading(true) + setMessage(null) + + await settingsApi.revokeApiKey() + + setApiKey(null) + setApiKeyCreatedAt(null) + showMessage('success', 'MCP API key revoked successfully') + } catch (error: any) { + console.error('Failed to revoke MCP API key:', error) + showMessage('error', error.response?.data?.detail || 'Failed to revoke MCP API key') + } finally { + setLoading(false) + } + } + + const copyToClipboard = async () => { + if (!apiKey) return + + try { + await navigator.clipboard.writeText(apiKey) + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } catch (error) { + console.error('Failed to copy:', error) + } + } + + const showMessage = (type: 'success' | 'error', text: string) => { + setMessage({ type, text }) + setTimeout(() => setMessage(null), 3000) + } + + const formatDate = (dateString: string) => { + return new Date(dateString).toLocaleString() + } + + const updateCategorySettings = async (category: string, categorySettings: any) => { + try { + setAppSettingsLoading(true) + setMessage(null) + + const updateMethods: Record Promise> = { + speech_detection: settingsApi.updateSpeechDetection, + conversation: settingsApi.updateConversation, + audio_processing: settingsApi.updateAudioProcessing, + diarization: settingsApi.updateDiarization, + llm: settingsApi.updateLLM, + providers: settingsApi.updateProviders, + network: settingsApi.updateNetwork, + misc: settingsApi.updateMisc, + } + + const updateMethod = updateMethods[category] + if (!updateMethod) { + throw new Error(`Unknown category: ${category}`) + } + + await updateMethod(categorySettings) + await loadApplicationSettings() + + showMessage('success', `Settings updated successfully!`) + } catch (error: any) { + console.error(`Failed to update ${category} settings:`, error) + showMessage( + 'error', + error.response?.data?.detail || `Failed to update ${category} settings` + ) + } finally { + setAppSettingsLoading(false) + } + } + + const renderSettingsField = ( + category: string, + key: string, + value: any, + label: string, + description?: string, + type: 'number' | 'boolean' | 'text' | 'select' = 'text', + options?: { value: string; label: string }[] + ) => { + const fieldId = `${category}_${key}` + + const handleChange = (newValue: any) => { + setAppSettings((prev: any) => ({ + ...prev, + [category]: { + ...prev[category], + [key]: newValue, + }, + })) + } + + if (type === 'boolean') { + return ( +
+ handleChange(e.target.checked)} + className="mt-1 h-4 w-4 rounded border-gray-300 dark:border-gray-600 text-blue-600 focus:ring-blue-500" + /> +
+ + {description && ( +

{description}

+ )} +
+
+ ) + } + + if (type === 'select') { + return ( +
+ + {description &&

{description}

} + +
+ ) + } + + return ( +
+ + {description &&

{description}

} + handleChange(type === 'number' ? parseFloat(e.target.value) : e.target.value)} + step={type === 'number' ? 'any' : undefined} + className="block w-full rounded-md border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 shadow-sm focus:border-blue-500 focus:ring-blue-500 sm:text-sm" + /> +
+ ) + } + + const tabs = [ + { id: 'core-infra' as Tab, label: 'Core Infra', icon: Server, adminOnly: false }, + { id: 'api-keys' as Tab, label: 'API Keys', icon: Shield, adminOnly: true }, + { id: 'mcp-key' as Tab, label: 'MCP Key', icon: Key, adminOnly: false }, + { id: 'memory' as Tab, label: 'Memory', icon: Database, adminOnly: true }, + { id: 'llm' as Tab, label: 'LLM', icon: Brain, adminOnly: true }, + { id: 'speech' as Tab, label: 'Speech', icon: Mic, adminOnly: true }, + { id: 'conversations' as Tab, label: 'Conversations', icon: MessageSquare, adminOnly: true }, + ] + + return ( +
+ {/* Header */} +
+ +

Settings

+
+ + {/* Message Display */} + {message && ( +
+
+ {message.type === 'success' ? ( + + ) : ( + + )} +

+ {message.text} +

+
+
+ )} + + {/* Tabs */} +
+ +
+ + {/* Tab Content */} +
+ {/* Core Infrastructure */} + {activeTab === 'core-infra' && ( +
+
+

+ Core Infrastructure +

+
+ +
+
+ + {infraSettingsLoading && !infraSettings ? ( +
+ +

Loading infrastructure settings...

+
+ ) : infraSettings ? ( +
+ {/* MongoDB */} +
+
+

MongoDB

+ {infraStatus?.mongodb && ( + + {infraStatus.mongodb.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+
+ + setInfraSettings({ ...infraSettings, mongodb_uri: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="mongodb://mongo:27017" + /> +
+
+ + setInfraSettings({ ...infraSettings, mongodb_database: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="friend-lite" + /> +
+
+
+ + {/* Redis */} +
+
+

Redis

+ {infraStatus?.redis && ( + + {infraStatus.redis.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+ + setInfraSettings({ ...infraSettings, redis_url: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="redis://localhost:6379/0" + /> +
+
+ + {/* Qdrant */} +
+
+

Qdrant

+ {infraStatus?.qdrant && ( + + {infraStatus.qdrant.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+
+ + setInfraSettings({ ...infraSettings, qdrant_base_url: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="qdrant" + /> +
+
+ + setInfraSettings({ ...infraSettings, qdrant_port: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="6333" + /> +
+
+
+ + {/* Neo4j */} +
+
+

Neo4j

+ {infraStatus?.neo4j && ( + + {infraStatus.neo4j.connected ? 'Connected' : 'Disconnected'} + + )} +
+
+
+ + setInfraSettings({ ...infraSettings, neo4j_host: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="neo4j-mem0" + /> +
+
+ + setInfraSettings({ ...infraSettings, neo4j_user: e.target.value })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="neo4j" + /> +
+
+
+ + {/* Network Settings */} + {appSettings && ( +
+

Network & Public Access

+
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, host_ip: e.target.value } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="localhost" + /> +

+ Public IP or hostname for browser access +

+
+
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, backend_public_port: parseInt(e.target.value) } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="8000" + /> +
+
+ + setAppSettings({ + ...appSettings, + network: { ...appSettings.network, webui_port: parseInt(e.target.value) } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="5173" + /> +
+
+
+
+ )} + + {/* System Settings */} + {appSettings && ( +
+

System

+
+
+ + setAppSettings({ + ...appSettings, + misc: { ...appSettings.misc, debug_dir: e.target.value } + })} + className="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100" + placeholder="./data/debug_dir" + /> +

+ Directory for debug files +

+
+
+ setAppSettings({ + ...appSettings, + misc: { ...appSettings.misc, langfuse_enable_telemetry: e.target.checked } + })} + className="h-4 w-4 text-blue-600 focus:ring-blue-500 border-gray-300 rounded" + /> + +
+
+
+ )} + + {/* Save and Reset buttons */} +
+ + +
+
+ ) : ( +
+ +

+ Failed to load infrastructure settings +

+ +
+ )} +
+ )} + + {/* API Keys */} + {activeTab === 'api-keys' && ( +
+

+ External Service API Keys +

+ +
+

+ Note: API keys are stored securely and take effect immediately after saving. + Leave fields empty to keep existing keys unchanged. +

+
+ + {/* Load from File Section */} +
+

+ Load API Keys from File +

+
+ setApiKeysFilePath(e.target.value)} + className="flex-1 px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder=".env.api-keys" + /> + +
+

+ Load API keys from a file on the server. Default: .env.api-keys in project root. +

+
+ + {apiKeysSettingsLoading && !apiKeysSettings ? ( +
+ +

Loading API keys...

+
+ ) : apiKeysSettings ? ( +
+ {/* OpenAI */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, openai_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="sk-..." + /> + +
+

+ For GPT models and embeddings +

+
+ + {/* Deepgram */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, deepgram_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Deepgram API key" + /> + +
+

+ For speech-to-text transcription +

+
+ + {/* Mistral */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, mistral_api_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Mistral API key" + /> + +
+

+ For Mistral/Voxtral transcription +

+
+ + {/* HuggingFace */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, hf_token: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="hf_..." + /> + +
+

+ For accessing HuggingFace models +

+
+ + {/* Langfuse */} +
+

+ Langfuse (Observability) +

+
+
+ +
+ setApiKeysSettings({ ...apiKeysSettings, langfuse_public_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="pk-lf-..." + /> + +
+
+
+ +
+ setApiKeysSettings({ ...apiKeysSettings, langfuse_secret_key: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="sk-lf-..." + /> + +
+
+
+
+ + {/* Ngrok */} +
+ +
+ setApiKeysSettings({ ...apiKeysSettings, ngrok_authtoken: e.target.value })} + className="w-full px-3 py-2 pr-10 border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-800 text-gray-900 dark:text-gray-100 font-mono text-sm" + placeholder="Enter Ngrok auth token" + /> + +
+

+ For public URL tunneling +

+
+ + {/* Save Options */} +
+

+ Save Options +

+
+ + +
+

+ You can save to file only, database only, or both for redundancy. +

+
+ + {/* Save and Reset buttons */} +
+ + +
+
+ ) : ( +
+ +

+ Failed to load API keys +

+ +
+ )} +
+ )} + + {/* MCP Key */} + {activeTab === 'mcp-key' && ( +
+

+ MCP API Key +

+ +

+ Generate an API key for Model Context Protocol (MCP) clients like Claude Desktop, + Cursor, or Windsurf to access your conversations. +

+ + {apiKey ? ( +
+
+
+ + Current MCP API Key + + {apiKeyCreatedAt && ( + + Created: {formatDate(apiKeyCreatedAt)} + + )} +
+ +
+ + {apiKey} + + +
+ +
+

+ MCP Server URL:{' '} + + http://your-server:8000/mcp/conversations/sse + +
+ Authorization: Bearer {apiKey} +

+
+
+ +
+ + + +
+
+ ) : ( +
+ +

No MCP API key generated yet

+ +
+ )} +
+ )} + + {/* Memory Settings */} + {activeTab === 'memory' && appSettings && ( +
+

+ Memory Settings +

+ +
+ {renderSettingsField( + 'providers', + 'memory_provider', + appSettings.providers.memory_provider, + 'Memory Provider', + 'Choose where memories are stored and processed', + 'select', + [ + { value: 'chronicle', label: 'Chronicle (Default)' }, + { value: 'openmemory_mcp', label: 'OpenMemory MCP' }, + { value: 'mycelia', label: 'Mycelia' }, + ] + )} +
+ +
+ +
+
+ )} + + {/* Speech Settings */} + {activeTab === 'speech' && appSettings && ( +
+

+ Speech & Audio Settings +

+ +
+ {/* Transcription */} +
+

+ Transcription +

+
+ {renderSettingsField( + 'providers', + 'transcription_provider', + appSettings.providers.transcription_provider, + 'Transcription Service', + 'Choose which service to use for speech-to-text', + 'select', + [ + { value: 'auto', label: 'Auto-detect' }, + { value: 'deepgram', label: 'Deepgram' }, + { value: 'mistral', label: 'Mistral' }, + { value: 'parakeet', label: 'Parakeet (Local)' }, + ] + )} +
+
+ + {/* Speech Detection */} +
+

+ Speech Detection +

+
+ {renderSettingsField( + 'speech_detection', + 'min_words', + appSettings.speech_detection.min_words, + 'Minimum Words', + 'Minimum words required to create a conversation', + 'number' + )} + {renderSettingsField( + 'speech_detection', + 'min_confidence', + appSettings.speech_detection.min_confidence, + 'Minimum Confidence', + 'Word confidence threshold (0.0-1.0)', + 'number' + )} + {renderSettingsField( + 'speech_detection', + 'min_duration', + appSettings.speech_detection.min_duration, + 'Minimum Duration (seconds)', + 'Minimum speech duration in seconds', + 'number' + )} +
+
+ + {/* Diarization */} +
+

+ Speaker Diarization +

+
+ {renderSettingsField( + 'diarization', + 'diarization_source', + appSettings.diarization.diarization_source, + 'Diarization Source', + 'Service to use for speaker identification', + 'select', + [ + { value: 'pyannote', label: 'PyAnnote' }, + { value: 'deepgram', label: 'Deepgram' }, + ] + )} + {renderSettingsField( + 'diarization', + 'min_speakers', + appSettings.diarization.min_speakers, + 'Minimum Speakers', + 'Minimum number of speakers to detect', + 'number' + )} + {renderSettingsField( + 'diarization', + 'max_speakers', + appSettings.diarization.max_speakers, + 'Maximum Speakers', + 'Maximum number of speakers to detect', + 'number' + )} +
+
+ + {/* Audio Processing */} +
+

+ Audio Processing +

+
+ {renderSettingsField( + 'audio_processing', + 'audio_cropping_enabled', + appSettings.audio_processing.audio_cropping_enabled, + 'Enable Audio Cropping', + 'Automatically remove silence from audio', + 'boolean' + )} + {renderSettingsField( + 'audio_processing', + 'min_speech_segment_duration', + appSettings.audio_processing.min_speech_segment_duration, + 'Min Speech Segment Duration', + 'Minimum duration for speech segments (seconds)', + 'number' + )} + {renderSettingsField( + 'audio_processing', + 'cropping_context_padding', + appSettings.audio_processing.cropping_context_padding, + 'Context Padding', + 'Padding around speech segments (0.0-1.0)', + 'number' + )} +
+
+
+ +
+ +
+
+ )} + + {/* Conversations Settings */} + {activeTab === 'conversations' && appSettings && ( +
+

+ Conversation Settings +

+ +
+ {renderSettingsField( + 'conversation', + 'transcription_buffer_seconds', + appSettings.conversation.transcription_buffer_seconds, + 'Transcription Buffer (seconds)', + 'Trigger transcription every N seconds', + 'number' + )} + {renderSettingsField( + 'conversation', + 'speech_inactivity_threshold', + appSettings.conversation.speech_inactivity_threshold, + 'Speech Inactivity Threshold (seconds)', + 'Close conversation after N seconds of silence', + 'number' + )} + {renderSettingsField( + 'conversation', + 'new_conversation_timeout_minutes', + appSettings.conversation.new_conversation_timeout_minutes, + 'New Conversation Timeout (minutes)', + 'Timeout for creating new conversations', + 'number' + )} + {renderSettingsField( + 'conversation', + 'record_only_enrolled_speakers', + appSettings.conversation.record_only_enrolled_speakers, + 'Record Only Enrolled Speakers', + 'Only create conversations when enrolled speakers are detected', + 'boolean' + )} +
+ +
+ +
+
+ )} + + {/* LLM Settings */} + {activeTab === 'llm' && appSettings && ( +
+

+ LLM Configuration +

+ +
+ {/* Provider Selection */} +
+

+ Provider +

+
+ {renderSettingsField( + 'llm', + 'llm_provider', + appSettings.llm.llm_provider, + 'LLM Provider', + 'Language model provider for memory extraction and chat', + 'select', + [ + { value: 'openai', label: 'OpenAI' }, + { value: 'ollama', label: 'Ollama' }, + ] + )} +
+
+ + {/* OpenAI Settings */} + {appSettings.llm.llm_provider === 'openai' && ( +
+

+ OpenAI Settings +

+
+ {renderSettingsField( + 'llm', + 'openai_model', + appSettings.llm.openai_model, + 'OpenAI Model', + 'Model to use for general tasks', + 'text' + )} + {renderSettingsField( + 'llm', + 'chat_llm_model', + appSettings.llm.chat_llm_model || '', + 'Chat Model (Optional)', + 'Specific model for chat (defaults to OpenAI model if not set)', + 'text' + )} + {renderSettingsField( + 'llm', + 'chat_temperature', + appSettings.llm.chat_temperature, + 'Chat Temperature', + 'Temperature for chat responses (0.0-2.0)', + 'number' + )} +
+
+ )} + + {/* Ollama Settings */} + {appSettings.llm.llm_provider === 'ollama' && ( +
+

+ Ollama Settings +

+
+ {renderSettingsField( + 'llm', + 'ollama_model', + appSettings.llm.ollama_model || '', + 'Ollama Model', + 'Model name for Ollama', + 'text' + )} + {renderSettingsField( + 'llm', + 'ollama_embedder_model', + appSettings.llm.ollama_embedder_model || '', + 'Ollama Embedder Model', + 'Embedder model name for Ollama', + 'text' + )} +
+
+ )} +
+ +
+ +
+
+ )} + + {/* Loading state for settings tabs */} + {['memory', 'llm', 'speech', 'conversations'].includes(activeTab) && + appSettingsLoading && + !appSettings && ( +
+ +

Loading settings...

+
+ )} +
+
+ ) +} diff --git a/backends/advanced/webui/src/services/api.ts b/backends/advanced/webui/src/services/api.ts index 0d988a9d..323bd369 100644 --- a/backends/advanced/webui/src/services/api.ts +++ b/backends/advanced/webui/src/services/api.ts @@ -258,14 +258,76 @@ export const chatApi = { export const speakerApi = { // Get current user's speaker configuration getSpeakerConfiguration: () => api.get('/api/speaker-configuration'), - + // Update current user's speaker configuration - updateSpeakerConfiguration: (primarySpeakers: Array<{speaker_id: string, name: string, user_id: number}>) => + updateSpeakerConfiguration: (primarySpeakers: Array<{speaker_id: string, name: string, user_id: number}>) => api.post('/api/speaker-configuration', primarySpeakers), - - // Get enrolled speakers from speaker recognition service + + // Get enrolled speakers from speaker recognition service getEnrolledSpeakers: () => api.get('/api/enrolled-speakers'), - + // Check speaker service status (admin only) getSpeakerServiceStatus: () => api.get('/api/speaker-service-status'), +} + +export const settingsApi = { + // Generate new API key for current user + generateApiKey: () => api.post('/api/users/me/api-key'), + + // Revoke current user's API key + revokeApiKey: () => api.delete('/api/users/me/api-key'), + + // Application settings (requires admin) + getAllSettings: () => api.get('/api/settings'), + updateAllSettings: (settings: any) => api.put('/api/settings', settings), + + // Individual setting categories + getSpeechDetection: () => api.get('/api/settings/speech-detection'), + updateSpeechDetection: (settings: any) => api.put('/api/settings/speech-detection', settings), + + getConversation: () => api.get('/api/settings/conversation'), + updateConversation: (settings: any) => api.put('/api/settings/conversation', settings), + + getAudioProcessing: () => api.get('/api/settings/audio-processing'), + updateAudioProcessing: (settings: any) => api.put('/api/settings/audio-processing', settings), + + getDiarization: () => api.get('/api/settings/diarization'), + updateDiarization: (settings: any) => api.put('/api/settings/diarization', settings), + + getLLM: () => api.get('/api/settings/llm'), + updateLLM: (settings: any) => api.put('/api/settings/llm', settings), + + getProviders: () => api.get('/api/settings/providers'), + updateProviders: (settings: any) => api.put('/api/settings/providers', settings), + + getNetwork: () => api.get('/api/settings/network'), + updateNetwork: (settings: any) => api.put('/api/settings/network', settings), + + getInfrastructure: () => api.get('/api/settings/infrastructure'), + updateInfrastructure: (settings: any) => api.put('/api/settings/infrastructure', settings), + + getMisc: () => api.get('/api/settings/misc'), + updateMisc: (settings: any) => api.put('/api/settings/misc', settings), + + getApiKeys: () => api.get('/api/settings/api-keys'), + updateApiKeys: (settings: any) => api.put('/api/settings/api-keys', settings), + saveApiKeys: (settings: any, saveToFile: boolean = true, saveToDatabase: boolean = true) => + api.post('/api/settings/api-keys/save', settings, { + params: { save_to_file: saveToFile, save_to_database: saveToDatabase } + }), + loadApiKeysFromFile: (filePath: string = '.env.api-keys') => + api.get('/api/settings/api-keys/load-from-file', { + params: { file_path: filePath } + }), + + // Cache management + invalidateCache: (category?: string) => api.post('/api/settings/cache/invalidate', null, { + params: category ? { category } : {} + }), + + // Infrastructure status + getInfrastructureStatus: () => api.get('/api/settings/infrastructure/status'), + + // API keys status + getApiKeysStatus: () => api.get('/api/settings/api-keys/status'), } \ No newline at end of file diff --git a/tests/setup/test_env.py b/tests/setup/test_env.py index 929e83e2..7e3ca983 100644 --- a/tests/setup/test_env.py +++ b/tests/setup/test_env.py @@ -1,25 +1,26 @@ # Test Environment Configuration import os from pathlib import Path +from dotenv import load_dotenv # Load .env file from backends/advanced directory if it exists # This allows tests to work when run from VSCode or command line -def load_env_file(): - """Load environment variables from .env file if it exists.""" - # Look for .env in backends/advanced directory - env_file = Path(__file__).parent.parent.parent / "backends" / "advanced" / ".env" - if env_file.exists(): - with open(env_file) as f: - for line in f: - line = line.strip() - if line and not line.startswith('#') and '=' in line: - key, value = line.split('=', 1) - # Only set if not already in environment (CI takes precedence) - if key not in os.environ: - os.environ[key] = value +# def load_env_file(): +# """Load environment variables from .env file if it exists.""" +# # Look for .env in backends/advanced directory +# env_file = Path(__file__).parent.parent.parent / "backends" / "advanced" / ".env" +# if env_file.exists(): +# with open(env_file) as f: +# for line in f: +# line = line.strip() +# if line and not line.startswith('#') and '=' in line: +# key, value = line.split('=', 1) +# # Only set if not already in environment (CI takes precedence) +# if key not in os.environ: +# os.environ[key] = value # Load .env file (CI environment variables take precedence) -load_env_file() +# load_env_file() # Load .env from backends/advanced directory to get COMPOSE_PROJECT_NAME backend_env_path = Path(__file__).resolve().parents[2] / "backends" / "advanced" / ".env"